1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 /* The caprices of the preprocessor require that this be declared right here */
28 #define CREATE_TRACE_POINTS
29
30 #include "dm_services_types.h"
31 #include "dc.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "dc/dc_state.h"
42 #include "amdgpu_dm_trace.h"
43 #include "link/protocols/link_dpcd.h"
44 #include "link_service_types.h"
45 #include "link/protocols/link_dp_capability.h"
46 #include "link/protocols/link_ddc.h"
47
48 #include "amdgpu.h"
49 #include "amdgpu_display.h"
50 #include "amdgpu_ucode.h"
51 #include "atom.h"
52 #include "amdgpu_dm.h"
53 #include "amdgpu_dm_plane.h"
54 #include "amdgpu_dm_crtc.h"
55 #include "amdgpu_dm_hdcp.h"
56 #include <drm/display/drm_hdcp_helper.h>
57 #include "amdgpu_dm_wb.h"
58 #include "amdgpu_atombios.h"
59
60 #include "amd_shared.h"
61 #include "amdgpu_dm_irq.h"
62 #include "dm_helpers.h"
63 #include "amdgpu_dm_mst_types.h"
64 #if defined(CONFIG_DEBUG_FS)
65 #include "amdgpu_dm_debugfs.h"
66 #endif
67 #include "amdgpu_dm_psr.h"
68 #include "amdgpu_dm_replay.h"
69
70 #include "ivsrcid/ivsrcid_vislands30.h"
71
72 #include <linux/backlight.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/types.h>
76 #include <linux/pm_runtime.h>
77 #include <linux/pci.h>
78 #include <linux/power_supply.h>
79 #include <linux/firmware.h>
80 #include <linux/component.h>
81 #include <linux/sort.h>
82
83 #include <drm/drm_privacy_screen_consumer.h>
84 #include <drm/display/drm_dp_mst_helper.h>
85 #include <drm/display/drm_hdmi_helper.h>
86 #include <drm/drm_atomic.h>
87 #include <drm/drm_atomic_uapi.h>
88 #include <drm/drm_atomic_helper.h>
89 #include <drm/drm_blend.h>
90 #include <drm/drm_fixed.h>
91 #include <drm/drm_fourcc.h>
92 #include <drm/drm_edid.h>
93 #include <drm/drm_eld.h>
94 #include <drm/drm_utils.h>
95 #include <drm/drm_vblank.h>
96 #include <drm/drm_audio_component.h>
97 #include <drm/drm_gem_atomic_helper.h>
98
99 #include <media/cec-notifier.h>
100 #include <acpi/video.h>
101
102 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
103
104 #include "modules/inc/mod_freesync.h"
105 #include "modules/power/power_helpers.h"
106
107 static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
108
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
131
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
136
137 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
139
140 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
142
143 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
144 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
145
146 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
147 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
148
149 #define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin"
150 MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB);
151
152 #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
153 MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
154
155 /* Number of bytes in PSP header for firmware. */
156 #define PSP_HEADER_BYTES 0x100
157
158 /* Number of bytes in PSP footer for firmware. */
159 #define PSP_FOOTER_BYTES 0x100
160
161 /**
162 * DOC: overview
163 *
164 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
165 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
166 * requests into DC requests, and DC responses into DRM responses.
167 *
168 * The root control structure is &struct amdgpu_display_manager.
169 */
170
171 /* basic init/fini API */
172 static int amdgpu_dm_init(struct amdgpu_device *adev);
173 static void amdgpu_dm_fini(struct amdgpu_device *adev);
174 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
175 static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
176 static struct amdgpu_i2c_adapter *
177 create_i2c(struct ddc_service *ddc_service, bool oem);
178
get_subconnector_type(struct dc_link * link)179 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
180 {
181 switch (link->dpcd_caps.dongle_type) {
182 case DISPLAY_DONGLE_NONE:
183 return DRM_MODE_SUBCONNECTOR_Native;
184 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
185 return DRM_MODE_SUBCONNECTOR_VGA;
186 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
187 case DISPLAY_DONGLE_DP_DVI_DONGLE:
188 return DRM_MODE_SUBCONNECTOR_DVID;
189 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
190 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
191 return DRM_MODE_SUBCONNECTOR_HDMIA;
192 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
193 default:
194 return DRM_MODE_SUBCONNECTOR_Unknown;
195 }
196 }
197
update_subconnector_property(struct amdgpu_dm_connector * aconnector)198 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
199 {
200 struct dc_link *link = aconnector->dc_link;
201 struct drm_connector *connector = &aconnector->base;
202 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
203
204 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
205 return;
206
207 if (aconnector->dc_sink)
208 subconnector = get_subconnector_type(link);
209
210 drm_object_property_set_value(&connector->base,
211 connector->dev->mode_config.dp_subconnector_property,
212 subconnector);
213 }
214
215 /*
216 * initializes drm_device display related structures, based on the information
217 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
218 * drm_encoder, drm_mode_config
219 *
220 * Returns 0 on success
221 */
222 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
223 /* removes and deallocates the drm structures, created by the above function */
224 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
225
226 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
227 struct amdgpu_dm_connector *amdgpu_dm_connector,
228 u32 link_index,
229 struct amdgpu_encoder *amdgpu_encoder);
230 static int amdgpu_dm_encoder_init(struct drm_device *dev,
231 struct amdgpu_encoder *aencoder,
232 uint32_t link_index);
233
234 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
235
236 static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
237 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
238
239 static int amdgpu_dm_atomic_check(struct drm_device *dev,
240 struct drm_atomic_state *state);
241
242 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
243 static void handle_hpd_rx_irq(void *param);
244
245 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
246 int bl_idx,
247 u32 user_brightness);
248
249 static bool
250 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
251 struct drm_crtc_state *new_crtc_state);
252 /*
253 * dm_vblank_get_counter
254 *
255 * @brief
256 * Get counter for number of vertical blanks
257 *
258 * @param
259 * struct amdgpu_device *adev - [in] desired amdgpu device
260 * int disp_idx - [in] which CRTC to get the counter from
261 *
262 * @return
263 * Counter for vertical blanks
264 */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)265 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
266 {
267 struct amdgpu_crtc *acrtc = NULL;
268
269 if (crtc >= adev->mode_info.num_crtc)
270 return 0;
271
272 acrtc = adev->mode_info.crtcs[crtc];
273
274 if (!acrtc->dm_irq_params.stream) {
275 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
276 crtc);
277 return 0;
278 }
279
280 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
281 }
282
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)283 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
284 u32 *vbl, u32 *position)
285 {
286 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
287 struct amdgpu_crtc *acrtc = NULL;
288 struct dc *dc = adev->dm.dc;
289
290 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
291 return -EINVAL;
292
293 acrtc = adev->mode_info.crtcs[crtc];
294
295 if (!acrtc->dm_irq_params.stream) {
296 drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
297 crtc);
298 return 0;
299 }
300
301 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
302 dc_allow_idle_optimizations(dc, false);
303
304 /*
305 * TODO rework base driver to use values directly.
306 * for now parse it back into reg-format
307 */
308 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
309 &v_blank_start,
310 &v_blank_end,
311 &h_position,
312 &v_position);
313
314 *position = v_position | (h_position << 16);
315 *vbl = v_blank_start | (v_blank_end << 16);
316
317 return 0;
318 }
319
dm_is_idle(struct amdgpu_ip_block * ip_block)320 static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
321 {
322 /* XXX todo */
323 return true;
324 }
325
dm_wait_for_idle(struct amdgpu_ip_block * ip_block)326 static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
327 {
328 /* XXX todo */
329 return 0;
330 }
331
dm_check_soft_reset(struct amdgpu_ip_block * ip_block)332 static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block)
333 {
334 return false;
335 }
336
dm_soft_reset(struct amdgpu_ip_block * ip_block)337 static int dm_soft_reset(struct amdgpu_ip_block *ip_block)
338 {
339 /* XXX todo */
340 return 0;
341 }
342
343 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)344 get_crtc_by_otg_inst(struct amdgpu_device *adev,
345 int otg_inst)
346 {
347 struct drm_device *dev = adev_to_drm(adev);
348 struct drm_crtc *crtc;
349 struct amdgpu_crtc *amdgpu_crtc;
350
351 if (WARN_ON(otg_inst == -1))
352 return adev->mode_info.crtcs[0];
353
354 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
355 amdgpu_crtc = to_amdgpu_crtc(crtc);
356
357 if (amdgpu_crtc->otg_inst == otg_inst)
358 return amdgpu_crtc;
359 }
360
361 return NULL;
362 }
363
is_dc_timing_adjust_needed(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)364 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
365 struct dm_crtc_state *new_state)
366 {
367 if (new_state->stream->adjust.timing_adjust_pending)
368 return true;
369 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
370 return true;
371 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
372 return true;
373 else
374 return false;
375 }
376
377 /*
378 * DC will program planes with their z-order determined by their ordering
379 * in the dc_surface_updates array. This comparator is used to sort them
380 * by descending zpos.
381 */
dm_plane_layer_index_cmp(const void * a,const void * b)382 static int dm_plane_layer_index_cmp(const void *a, const void *b)
383 {
384 const struct dc_surface_update *sa = (struct dc_surface_update *)a;
385 const struct dc_surface_update *sb = (struct dc_surface_update *)b;
386
387 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
388 return sb->surface->layer_index - sa->surface->layer_index;
389 }
390
391 /**
392 * update_planes_and_stream_adapter() - Send planes to be updated in DC
393 *
394 * DC has a generic way to update planes and stream via
395 * dc_update_planes_and_stream function; however, DM might need some
396 * adjustments and preparation before calling it. This function is a wrapper
397 * for the dc_update_planes_and_stream that does any required configuration
398 * before passing control to DC.
399 *
400 * @dc: Display Core control structure
401 * @update_type: specify whether it is FULL/MEDIUM/FAST update
402 * @planes_count: planes count to update
403 * @stream: stream state
404 * @stream_update: stream update
405 * @array_of_surface_update: dc surface update pointer
406 *
407 */
update_planes_and_stream_adapter(struct dc * dc,int update_type,int planes_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_surface_update * array_of_surface_update)408 static inline bool update_planes_and_stream_adapter(struct dc *dc,
409 int update_type,
410 int planes_count,
411 struct dc_stream_state *stream,
412 struct dc_stream_update *stream_update,
413 struct dc_surface_update *array_of_surface_update)
414 {
415 sort(array_of_surface_update, planes_count,
416 sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
417
418 /*
419 * Previous frame finished and HW is ready for optimization.
420 */
421 dc_post_update_surfaces_to_stream(dc);
422
423 return dc_update_planes_and_stream(dc,
424 array_of_surface_update,
425 planes_count,
426 stream,
427 stream_update);
428 }
429
430 /**
431 * dm_pflip_high_irq() - Handle pageflip interrupt
432 * @interrupt_params: ignored
433 *
434 * Handles the pageflip interrupt by notifying all interested parties
435 * that the pageflip has been completed.
436 */
dm_pflip_high_irq(void * interrupt_params)437 static void dm_pflip_high_irq(void *interrupt_params)
438 {
439 struct amdgpu_crtc *amdgpu_crtc;
440 struct common_irq_params *irq_params = interrupt_params;
441 struct amdgpu_device *adev = irq_params->adev;
442 struct drm_device *dev = adev_to_drm(adev);
443 unsigned long flags;
444 struct drm_pending_vblank_event *e;
445 u32 vpos, hpos, v_blank_start, v_blank_end;
446 bool vrr_active;
447
448 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
449
450 /* IRQ could occur when in initial stage */
451 /* TODO work and BO cleanup */
452 if (amdgpu_crtc == NULL) {
453 drm_dbg_state(dev, "CRTC is null, returning.\n");
454 return;
455 }
456
457 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
458
459 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
460 drm_dbg_state(dev,
461 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
462 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
463 amdgpu_crtc->crtc_id, amdgpu_crtc);
464 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
465 return;
466 }
467
468 /* page flip completed. */
469 e = amdgpu_crtc->event;
470 amdgpu_crtc->event = NULL;
471
472 WARN_ON(!e);
473
474 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
475
476 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
477 if (!vrr_active ||
478 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
479 &v_blank_end, &hpos, &vpos) ||
480 (vpos < v_blank_start)) {
481 /* Update to correct count and vblank timestamp if racing with
482 * vblank irq. This also updates to the correct vblank timestamp
483 * even in VRR mode, as scanout is past the front-porch atm.
484 */
485 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
486
487 /* Wake up userspace by sending the pageflip event with proper
488 * count and timestamp of vblank of flip completion.
489 */
490 if (e) {
491 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
492
493 /* Event sent, so done with vblank for this flip */
494 drm_crtc_vblank_put(&amdgpu_crtc->base);
495 }
496 } else if (e) {
497 /* VRR active and inside front-porch: vblank count and
498 * timestamp for pageflip event will only be up to date after
499 * drm_crtc_handle_vblank() has been executed from late vblank
500 * irq handler after start of back-porch (vline 0). We queue the
501 * pageflip event for send-out by drm_crtc_handle_vblank() with
502 * updated timestamp and count, once it runs after us.
503 *
504 * We need to open-code this instead of using the helper
505 * drm_crtc_arm_vblank_event(), as that helper would
506 * call drm_crtc_accurate_vblank_count(), which we must
507 * not call in VRR mode while we are in front-porch!
508 */
509
510 /* sequence will be replaced by real count during send-out. */
511 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
512 e->pipe = amdgpu_crtc->crtc_id;
513
514 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
515 e = NULL;
516 }
517
518 /* Keep track of vblank of this flip for flip throttling. We use the
519 * cooked hw counter, as that one incremented at start of this vblank
520 * of pageflip completion, so last_flip_vblank is the forbidden count
521 * for queueing new pageflips if vsync + VRR is enabled.
522 */
523 amdgpu_crtc->dm_irq_params.last_flip_vblank =
524 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
525
526 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
527 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528
529 drm_dbg_state(dev,
530 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
531 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
532 }
533
dm_handle_vmin_vmax_update(struct work_struct * offload_work)534 static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
535 {
536 struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
537 struct amdgpu_device *adev = work->adev;
538 struct dc_stream_state *stream = work->stream;
539 struct dc_crtc_timing_adjust *adjust = work->adjust;
540
541 mutex_lock(&adev->dm.dc_lock);
542 dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
543 mutex_unlock(&adev->dm.dc_lock);
544
545 dc_stream_release(stream);
546 kfree(work->adjust);
547 kfree(work);
548 }
549
schedule_dc_vmin_vmax(struct amdgpu_device * adev,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)550 static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
551 struct dc_stream_state *stream,
552 struct dc_crtc_timing_adjust *adjust)
553 {
554 struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
555 if (!offload_work) {
556 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
557 return;
558 }
559
560 struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
561 if (!adjust_copy) {
562 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
563 kfree(offload_work);
564 return;
565 }
566
567 dc_stream_retain(stream);
568 memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
569
570 INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
571 offload_work->adev = adev;
572 offload_work->stream = stream;
573 offload_work->adjust = adjust_copy;
574
575 queue_work(system_wq, &offload_work->work);
576 }
577
dm_vupdate_high_irq(void * interrupt_params)578 static void dm_vupdate_high_irq(void *interrupt_params)
579 {
580 struct common_irq_params *irq_params = interrupt_params;
581 struct amdgpu_device *adev = irq_params->adev;
582 struct amdgpu_crtc *acrtc;
583 struct drm_device *drm_dev;
584 struct drm_vblank_crtc *vblank;
585 ktime_t frame_duration_ns, previous_timestamp;
586 unsigned long flags;
587 int vrr_active;
588
589 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
590
591 if (acrtc) {
592 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
593 drm_dev = acrtc->base.dev;
594 vblank = drm_crtc_vblank_crtc(&acrtc->base);
595 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
596 frame_duration_ns = vblank->time - previous_timestamp;
597
598 if (frame_duration_ns > 0) {
599 trace_amdgpu_refresh_rate_track(acrtc->base.index,
600 frame_duration_ns,
601 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
602 atomic64_set(&irq_params->previous_timestamp, vblank->time);
603 }
604
605 drm_dbg_vbl(drm_dev,
606 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
607 vrr_active);
608
609 /* Core vblank handling is done here after end of front-porch in
610 * vrr mode, as vblank timestamping will give valid results
611 * while now done after front-porch. This will also deliver
612 * page-flip completion events that have been queued to us
613 * if a pageflip happened inside front-porch.
614 */
615 if (vrr_active && acrtc->dm_irq_params.stream) {
616 bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
617 bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
618 bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
619 == VRR_STATE_ACTIVE_VARIABLE;
620
621 amdgpu_dm_crtc_handle_vblank(acrtc);
622
623 /* BTR processing for pre-DCE12 ASICs */
624 if (adev->family < AMDGPU_FAMILY_AI) {
625 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
626 mod_freesync_handle_v_update(
627 adev->dm.freesync_module,
628 acrtc->dm_irq_params.stream,
629 &acrtc->dm_irq_params.vrr_params);
630
631 if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
632 schedule_dc_vmin_vmax(adev,
633 acrtc->dm_irq_params.stream,
634 &acrtc->dm_irq_params.vrr_params.adjust);
635 }
636 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
637 }
638 }
639 }
640 }
641
642 /**
643 * dm_crtc_high_irq() - Handles CRTC interrupt
644 * @interrupt_params: used for determining the CRTC instance
645 *
646 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
647 * event handler.
648 */
dm_crtc_high_irq(void * interrupt_params)649 static void dm_crtc_high_irq(void *interrupt_params)
650 {
651 struct common_irq_params *irq_params = interrupt_params;
652 struct amdgpu_device *adev = irq_params->adev;
653 struct drm_writeback_job *job;
654 struct amdgpu_crtc *acrtc;
655 unsigned long flags;
656 int vrr_active;
657
658 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
659 if (!acrtc)
660 return;
661
662 if (acrtc->wb_conn) {
663 spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
664
665 if (acrtc->wb_pending) {
666 job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
667 struct drm_writeback_job,
668 list_entry);
669 acrtc->wb_pending = false;
670 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
671
672 if (job) {
673 unsigned int v_total, refresh_hz;
674 struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
675
676 v_total = stream->adjust.v_total_max ?
677 stream->adjust.v_total_max : stream->timing.v_total;
678 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
679 100LL, (v_total * stream->timing.h_total));
680 mdelay(1000 / refresh_hz);
681
682 drm_writeback_signal_completion(acrtc->wb_conn, 0);
683 dc_stream_fc_disable_writeback(adev->dm.dc,
684 acrtc->dm_irq_params.stream, 0);
685 }
686 } else
687 spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
688 }
689
690 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
691
692 drm_dbg_vbl(adev_to_drm(adev),
693 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
694 vrr_active, acrtc->dm_irq_params.active_planes);
695
696 /**
697 * Core vblank handling at start of front-porch is only possible
698 * in non-vrr mode, as only there vblank timestamping will give
699 * valid results while done in front-porch. Otherwise defer it
700 * to dm_vupdate_high_irq after end of front-porch.
701 */
702 if (!vrr_active)
703 amdgpu_dm_crtc_handle_vblank(acrtc);
704
705 /**
706 * Following stuff must happen at start of vblank, for crc
707 * computation and below-the-range btr support in vrr mode.
708 */
709 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
710
711 /* BTR updates need to happen before VUPDATE on Vega and above. */
712 if (adev->family < AMDGPU_FAMILY_AI)
713 return;
714
715 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
716
717 if (acrtc->dm_irq_params.stream &&
718 acrtc->dm_irq_params.vrr_params.supported) {
719 bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
720 bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
721 bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
722
723 mod_freesync_handle_v_update(adev->dm.freesync_module,
724 acrtc->dm_irq_params.stream,
725 &acrtc->dm_irq_params.vrr_params);
726
727 /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
728 if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
729 schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
730 &acrtc->dm_irq_params.vrr_params.adjust);
731 }
732 }
733
734 /*
735 * If there aren't any active_planes then DCH HUBP may be clock-gated.
736 * In that case, pageflip completion interrupts won't fire and pageflip
737 * completion events won't get delivered. Prevent this by sending
738 * pending pageflip events from here if a flip is still pending.
739 *
740 * If any planes are enabled, use dm_pflip_high_irq() instead, to
741 * avoid race conditions between flip programming and completion,
742 * which could cause too early flip completion events.
743 */
744 if (adev->family >= AMDGPU_FAMILY_RV &&
745 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
746 acrtc->dm_irq_params.active_planes == 0) {
747 if (acrtc->event) {
748 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
749 acrtc->event = NULL;
750 drm_crtc_vblank_put(&acrtc->base);
751 }
752 acrtc->pflip_status = AMDGPU_FLIP_NONE;
753 }
754
755 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
756 }
757
758 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
759 /**
760 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
761 * DCN generation ASICs
762 * @interrupt_params: interrupt parameters
763 *
764 * Used to set crc window/read out crc value at vertical line 0 position
765 */
dm_dcn_vertical_interrupt0_high_irq(void * interrupt_params)766 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
767 {
768 struct common_irq_params *irq_params = interrupt_params;
769 struct amdgpu_device *adev = irq_params->adev;
770 struct amdgpu_crtc *acrtc;
771
772 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
773
774 if (!acrtc)
775 return;
776
777 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
778 }
779 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
780
781 /**
782 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
783 * @adev: amdgpu_device pointer
784 * @notify: dmub notification structure
785 *
786 * Dmub AUX or SET_CONFIG command completion processing callback
787 * Copies dmub notification to DM which is to be read by AUX command.
788 * issuing thread and also signals the event to wake up the thread.
789 */
dmub_aux_setconfig_callback(struct amdgpu_device * adev,struct dmub_notification * notify)790 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
791 struct dmub_notification *notify)
792 {
793 if (adev->dm.dmub_notify)
794 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
795 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
796 complete(&adev->dm.dmub_aux_transfer_done);
797 }
798
dmub_aux_fused_io_callback(struct amdgpu_device * adev,struct dmub_notification * notify)799 static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
800 struct dmub_notification *notify)
801 {
802 if (!adev || !notify) {
803 ASSERT(false);
804 return;
805 }
806
807 const struct dmub_cmd_fused_request *req = ¬ify->fused_request;
808 const uint8_t ddc_line = req->u.aux.ddc_line;
809
810 if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
811 ASSERT(false);
812 return;
813 }
814
815 struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
816
817 static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
818 memcpy(sync->reply_data, req, sizeof(*req));
819 complete(&sync->replied);
820 }
821
822 /**
823 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
824 * @adev: amdgpu_device pointer
825 * @notify: dmub notification structure
826 *
827 * Dmub Hpd interrupt processing callback. Gets displayindex through the
828 * ink index and calls helper to do the processing.
829 */
dmub_hpd_callback(struct amdgpu_device * adev,struct dmub_notification * notify)830 static void dmub_hpd_callback(struct amdgpu_device *adev,
831 struct dmub_notification *notify)
832 {
833 struct amdgpu_dm_connector *aconnector;
834 struct amdgpu_dm_connector *hpd_aconnector = NULL;
835 struct drm_connector *connector;
836 struct drm_connector_list_iter iter;
837 struct dc_link *link;
838 u8 link_index = 0;
839 struct drm_device *dev;
840
841 if (adev == NULL)
842 return;
843
844 if (notify == NULL) {
845 drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
846 return;
847 }
848
849 if (notify->link_index > adev->dm.dc->link_count) {
850 drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
851 return;
852 }
853
854 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
855 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
856 drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
857 return;
858 }
859
860 link_index = notify->link_index;
861 link = adev->dm.dc->links[link_index];
862 dev = adev->dm.ddev;
863
864 drm_connector_list_iter_begin(dev, &iter);
865 drm_for_each_connector_iter(connector, &iter) {
866
867 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
868 continue;
869
870 aconnector = to_amdgpu_dm_connector(connector);
871 if (link && aconnector->dc_link == link) {
872 if (notify->type == DMUB_NOTIFICATION_HPD)
873 drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
874 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
875 drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
876 else
877 drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
878 notify->type, link_index);
879
880 hpd_aconnector = aconnector;
881 break;
882 }
883 }
884 drm_connector_list_iter_end(&iter);
885
886 if (hpd_aconnector) {
887 if (notify->type == DMUB_NOTIFICATION_HPD) {
888 if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
889 drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
890 handle_hpd_irq_helper(hpd_aconnector);
891 } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
892 handle_hpd_rx_irq(hpd_aconnector);
893 }
894 }
895 }
896
897 /**
898 * dmub_hpd_sense_callback - DMUB HPD sense processing callback.
899 * @adev: amdgpu_device pointer
900 * @notify: dmub notification structure
901 *
902 * HPD sense changes can occur during low power states and need to be
903 * notified from firmware to driver.
904 */
dmub_hpd_sense_callback(struct amdgpu_device * adev,struct dmub_notification * notify)905 static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
906 struct dmub_notification *notify)
907 {
908 drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
909 }
910
911 /**
912 * register_dmub_notify_callback - Sets callback for DMUB notify
913 * @adev: amdgpu_device pointer
914 * @type: Type of dmub notification
915 * @callback: Dmub interrupt callback function
916 * @dmub_int_thread_offload: offload indicator
917 *
918 * API to register a dmub callback handler for a dmub notification
919 * Also sets indicator whether callback processing to be offloaded.
920 * to dmub interrupt handling thread
921 * Return: true if successfully registered, false if there is existing registration
922 */
register_dmub_notify_callback(struct amdgpu_device * adev,enum dmub_notification_type type,dmub_notify_interrupt_callback_t callback,bool dmub_int_thread_offload)923 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
924 enum dmub_notification_type type,
925 dmub_notify_interrupt_callback_t callback,
926 bool dmub_int_thread_offload)
927 {
928 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
929 adev->dm.dmub_callback[type] = callback;
930 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
931 } else
932 return false;
933
934 return true;
935 }
936
dm_handle_hpd_work(struct work_struct * work)937 static void dm_handle_hpd_work(struct work_struct *work)
938 {
939 struct dmub_hpd_work *dmub_hpd_wrk;
940
941 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
942
943 if (!dmub_hpd_wrk->dmub_notify) {
944 drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
945 return;
946 }
947
948 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
949 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
950 dmub_hpd_wrk->dmub_notify);
951 }
952
953 kfree(dmub_hpd_wrk->dmub_notify);
954 kfree(dmub_hpd_wrk);
955
956 }
957
dmub_notification_type_str(enum dmub_notification_type e)958 static const char *dmub_notification_type_str(enum dmub_notification_type e)
959 {
960 switch (e) {
961 case DMUB_NOTIFICATION_NO_DATA:
962 return "NO_DATA";
963 case DMUB_NOTIFICATION_AUX_REPLY:
964 return "AUX_REPLY";
965 case DMUB_NOTIFICATION_HPD:
966 return "HPD";
967 case DMUB_NOTIFICATION_HPD_IRQ:
968 return "HPD_IRQ";
969 case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
970 return "SET_CONFIG_REPLY";
971 case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
972 return "DPIA_NOTIFICATION";
973 case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
974 return "HPD_SENSE_NOTIFY";
975 case DMUB_NOTIFICATION_FUSED_IO:
976 return "FUSED_IO";
977 default:
978 return "<unknown>";
979 }
980 }
981
982 #define DMUB_TRACE_MAX_READ 64
983 /**
984 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
985 * @interrupt_params: used for determining the Outbox instance
986 *
987 * Handles the Outbox Interrupt
988 * event handler.
989 */
dm_dmub_outbox1_low_irq(void * interrupt_params)990 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
991 {
992 struct dmub_notification notify = {0};
993 struct common_irq_params *irq_params = interrupt_params;
994 struct amdgpu_device *adev = irq_params->adev;
995 struct amdgpu_display_manager *dm = &adev->dm;
996 struct dmcub_trace_buf_entry entry = { 0 };
997 u32 count = 0;
998 struct dmub_hpd_work *dmub_hpd_wrk;
999
1000 do {
1001 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
1002 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
1003 entry.param0, entry.param1);
1004
1005 drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
1006 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
1007 } else
1008 break;
1009
1010 count++;
1011
1012 } while (count <= DMUB_TRACE_MAX_READ);
1013
1014 if (count > DMUB_TRACE_MAX_READ)
1015 drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
1016
1017 if (dc_enable_dmub_notifications(adev->dm.dc) &&
1018 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
1019
1020 do {
1021 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
1022 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
1023 drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
1024 continue;
1025 }
1026 if (!dm->dmub_callback[notify.type]) {
1027 drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
1028 dmub_notification_type_str(notify.type));
1029 continue;
1030 }
1031 if (dm->dmub_thread_offload[notify.type] == true) {
1032 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
1033 if (!dmub_hpd_wrk) {
1034 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
1035 return;
1036 }
1037 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification),
1038 GFP_ATOMIC);
1039 if (!dmub_hpd_wrk->dmub_notify) {
1040 kfree(dmub_hpd_wrk);
1041 drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
1042 return;
1043 }
1044 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
1045 dmub_hpd_wrk->adev = adev;
1046 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
1047 } else {
1048 dm->dmub_callback[notify.type](adev, ¬ify);
1049 }
1050 } while (notify.pending_notification);
1051 }
1052 }
1053
dm_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1054 static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1055 enum amd_clockgating_state state)
1056 {
1057 return 0;
1058 }
1059
dm_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1060 static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
1061 enum amd_powergating_state state)
1062 {
1063 return 0;
1064 }
1065
1066 /* Prototypes of private functions */
1067 static int dm_early_init(struct amdgpu_ip_block *ip_block);
1068
1069 /* Allocate memory for FBC compressed data */
amdgpu_dm_fbc_init(struct drm_connector * connector)1070 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
1071 {
1072 struct amdgpu_device *adev = drm_to_adev(connector->dev);
1073 struct dm_compressor_info *compressor = &adev->dm.compressor;
1074 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
1075 struct drm_display_mode *mode;
1076 unsigned long max_size = 0;
1077
1078 if (adev->dm.dc->fbc_compressor == NULL)
1079 return;
1080
1081 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
1082 return;
1083
1084 if (compressor->bo_ptr)
1085 return;
1086
1087
1088 list_for_each_entry(mode, &connector->modes, head) {
1089 if (max_size < (unsigned long) mode->htotal * mode->vtotal)
1090 max_size = (unsigned long) mode->htotal * mode->vtotal;
1091 }
1092
1093 if (max_size) {
1094 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
1095 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
1096 &compressor->gpu_addr, &compressor->cpu_addr);
1097
1098 if (r)
1099 drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
1100 else {
1101 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
1102 drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
1103 }
1104
1105 }
1106
1107 }
1108
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)1109 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
1110 int pipe, bool *enabled,
1111 unsigned char *buf, int max_bytes)
1112 {
1113 struct drm_device *dev = dev_get_drvdata(kdev);
1114 struct amdgpu_device *adev = drm_to_adev(dev);
1115 struct drm_connector *connector;
1116 struct drm_connector_list_iter conn_iter;
1117 struct amdgpu_dm_connector *aconnector;
1118 int ret = 0;
1119
1120 *enabled = false;
1121
1122 mutex_lock(&adev->dm.audio_lock);
1123
1124 drm_connector_list_iter_begin(dev, &conn_iter);
1125 drm_for_each_connector_iter(connector, &conn_iter) {
1126
1127 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
1128 continue;
1129
1130 aconnector = to_amdgpu_dm_connector(connector);
1131 if (aconnector->audio_inst != port)
1132 continue;
1133
1134 *enabled = true;
1135 mutex_lock(&connector->eld_mutex);
1136 ret = drm_eld_size(connector->eld);
1137 memcpy(buf, connector->eld, min(max_bytes, ret));
1138 mutex_unlock(&connector->eld_mutex);
1139
1140 break;
1141 }
1142 drm_connector_list_iter_end(&conn_iter);
1143
1144 mutex_unlock(&adev->dm.audio_lock);
1145
1146 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
1147
1148 return ret;
1149 }
1150
1151 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
1152 .get_eld = amdgpu_dm_audio_component_get_eld,
1153 };
1154
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)1155 static int amdgpu_dm_audio_component_bind(struct device *kdev,
1156 struct device *hda_kdev, void *data)
1157 {
1158 struct drm_device *dev = dev_get_drvdata(kdev);
1159 struct amdgpu_device *adev = drm_to_adev(dev);
1160 struct drm_audio_component *acomp = data;
1161
1162 acomp->ops = &amdgpu_dm_audio_component_ops;
1163 acomp->dev = kdev;
1164 adev->dm.audio_component = acomp;
1165
1166 return 0;
1167 }
1168
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)1169 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
1170 struct device *hda_kdev, void *data)
1171 {
1172 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
1173 struct drm_audio_component *acomp = data;
1174
1175 acomp->ops = NULL;
1176 acomp->dev = NULL;
1177 adev->dm.audio_component = NULL;
1178 }
1179
1180 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
1181 .bind = amdgpu_dm_audio_component_bind,
1182 .unbind = amdgpu_dm_audio_component_unbind,
1183 };
1184
amdgpu_dm_audio_init(struct amdgpu_device * adev)1185 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
1186 {
1187 int i, ret;
1188
1189 if (!amdgpu_audio)
1190 return 0;
1191
1192 adev->mode_info.audio.enabled = true;
1193
1194 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1195
1196 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1197 adev->mode_info.audio.pin[i].channels = -1;
1198 adev->mode_info.audio.pin[i].rate = -1;
1199 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1200 adev->mode_info.audio.pin[i].status_bits = 0;
1201 adev->mode_info.audio.pin[i].category_code = 0;
1202 adev->mode_info.audio.pin[i].connected = false;
1203 adev->mode_info.audio.pin[i].id =
1204 adev->dm.dc->res_pool->audios[i]->inst;
1205 adev->mode_info.audio.pin[i].offset = 0;
1206 }
1207
1208 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1209 if (ret < 0)
1210 return ret;
1211
1212 adev->dm.audio_registered = true;
1213
1214 return 0;
1215 }
1216
amdgpu_dm_audio_fini(struct amdgpu_device * adev)1217 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1218 {
1219 if (!amdgpu_audio)
1220 return;
1221
1222 if (!adev->mode_info.audio.enabled)
1223 return;
1224
1225 if (adev->dm.audio_registered) {
1226 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1227 adev->dm.audio_registered = false;
1228 }
1229
1230 /* TODO: Disable audio? */
1231
1232 adev->mode_info.audio.enabled = false;
1233 }
1234
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)1235 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1236 {
1237 struct drm_audio_component *acomp = adev->dm.audio_component;
1238
1239 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1240 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1241
1242 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1243 pin, -1);
1244 }
1245 }
1246
dm_dmub_hw_init(struct amdgpu_device * adev)1247 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1248 {
1249 const struct dmcub_firmware_header_v1_0 *hdr;
1250 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1251 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1252 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1253 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1254 struct abm *abm = adev->dm.dc->res_pool->abm;
1255 struct dc_context *ctx = adev->dm.dc->ctx;
1256 struct dmub_srv_hw_params hw_params;
1257 enum dmub_status status;
1258 const unsigned char *fw_inst_const, *fw_bss_data;
1259 u32 i, fw_inst_const_size, fw_bss_data_size;
1260 bool has_hw_support;
1261
1262 if (!dmub_srv)
1263 /* DMUB isn't supported on the ASIC. */
1264 return 0;
1265
1266 if (!fb_info) {
1267 drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
1268 return -EINVAL;
1269 }
1270
1271 if (!dmub_fw) {
1272 /* Firmware required for DMUB support. */
1273 drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
1274 return -EINVAL;
1275 }
1276
1277 /* initialize register offsets for ASICs with runtime initialization available */
1278 if (dmub_srv->hw_funcs.init_reg_offsets)
1279 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
1280
1281 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1282 if (status != DMUB_STATUS_OK) {
1283 drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
1284 return -EINVAL;
1285 }
1286
1287 if (!has_hw_support) {
1288 drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
1289 return 0;
1290 }
1291
1292 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1293 status = dmub_srv_hw_reset(dmub_srv);
1294 if (status != DMUB_STATUS_OK)
1295 drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
1296
1297 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1298
1299 fw_inst_const = dmub_fw->data +
1300 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1301 PSP_HEADER_BYTES;
1302
1303 fw_bss_data = dmub_fw->data +
1304 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1305 le32_to_cpu(hdr->inst_const_bytes);
1306
1307 /* Copy firmware and bios info into FB memory. */
1308 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1309 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1310
1311 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1312
1313 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1314 * amdgpu_ucode_init_single_fw will load dmub firmware
1315 * fw_inst_const part to cw0; otherwise, the firmware back door load
1316 * will be done by dm_dmub_hw_init
1317 */
1318 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1319 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1320 fw_inst_const_size);
1321 }
1322
1323 if (fw_bss_data_size)
1324 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1325 fw_bss_data, fw_bss_data_size);
1326
1327 /* Copy firmware bios info into FB memory. */
1328 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1329 adev->bios_size);
1330
1331 /* Reset regions that need to be reset. */
1332 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1333 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1334
1335 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1336 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1337
1338 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1339 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1340
1341 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0,
1342 fb_info->fb[DMUB_WINDOW_SHARED_STATE].size);
1343
1344 /* Initialize hardware. */
1345 memset(&hw_params, 0, sizeof(hw_params));
1346 hw_params.fb_base = adev->gmc.fb_start;
1347 hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1348
1349 /* backdoor load firmware and trigger dmub running */
1350 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1351 hw_params.load_inst_const = true;
1352
1353 if (dmcu)
1354 hw_params.psp_version = dmcu->psp_version;
1355
1356 for (i = 0; i < fb_info->num_fb; ++i)
1357 hw_params.fb[i] = &fb_info->fb[i];
1358
1359 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1360 case IP_VERSION(3, 1, 3):
1361 case IP_VERSION(3, 1, 4):
1362 case IP_VERSION(3, 5, 0):
1363 case IP_VERSION(3, 5, 1):
1364 case IP_VERSION(3, 6, 0):
1365 case IP_VERSION(4, 0, 1):
1366 hw_params.dpia_supported = true;
1367 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1368 break;
1369 default:
1370 break;
1371 }
1372
1373 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1374 case IP_VERSION(3, 5, 0):
1375 case IP_VERSION(3, 5, 1):
1376 case IP_VERSION(3, 6, 0):
1377 hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
1378 hw_params.lower_hbr3_phy_ssc = true;
1379 break;
1380 default:
1381 break;
1382 }
1383
1384 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1385 if (status != DMUB_STATUS_OK) {
1386 drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
1387 return -EINVAL;
1388 }
1389
1390 /* Wait for firmware load to finish. */
1391 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1392 if (status != DMUB_STATUS_OK)
1393 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
1394
1395 /* Init DMCU and ABM if available. */
1396 if (dmcu && abm) {
1397 dmcu->funcs->dmcu_init(dmcu);
1398 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1399 }
1400
1401 if (!adev->dm.dc->ctx->dmub_srv)
1402 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1403 if (!adev->dm.dc->ctx->dmub_srv) {
1404 drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
1405 return -ENOMEM;
1406 }
1407
1408 drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
1409 adev->dm.dmcub_fw_version);
1410
1411 /* Keeping sanity checks off if
1412 * DCN31 >= 4.0.59.0
1413 * DCN314 >= 8.0.16.0
1414 * Otherwise, turn on sanity checks
1415 */
1416 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1417 case IP_VERSION(3, 1, 2):
1418 case IP_VERSION(3, 1, 3):
1419 if (adev->dm.dmcub_fw_version &&
1420 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
1421 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
1422 adev->dm.dc->debug.sanity_checks = true;
1423 break;
1424 case IP_VERSION(3, 1, 4):
1425 if (adev->dm.dmcub_fw_version &&
1426 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
1427 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
1428 adev->dm.dc->debug.sanity_checks = true;
1429 break;
1430 default:
1431 break;
1432 }
1433
1434 return 0;
1435 }
1436
dm_dmub_hw_resume(struct amdgpu_device * adev)1437 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1438 {
1439 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1440 enum dmub_status status;
1441 bool init;
1442 int r;
1443
1444 if (!dmub_srv) {
1445 /* DMUB isn't supported on the ASIC. */
1446 return;
1447 }
1448
1449 status = dmub_srv_is_hw_init(dmub_srv, &init);
1450 if (status != DMUB_STATUS_OK)
1451 drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
1452
1453 if (status == DMUB_STATUS_OK && init) {
1454 /* Wait for firmware load to finish. */
1455 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1456 if (status != DMUB_STATUS_OK)
1457 drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
1458 } else {
1459 /* Perform the full hardware initialization. */
1460 r = dm_dmub_hw_init(adev);
1461 if (r)
1462 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
1463 }
1464 }
1465
mmhub_read_system_context(struct amdgpu_device * adev,struct dc_phy_addr_space_config * pa_config)1466 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1467 {
1468 u64 pt_base;
1469 u32 logical_addr_low;
1470 u32 logical_addr_high;
1471 u32 agp_base, agp_bot, agp_top;
1472 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1473
1474 memset(pa_config, 0, sizeof(*pa_config));
1475
1476 agp_base = 0;
1477 agp_bot = adev->gmc.agp_start >> 24;
1478 agp_top = adev->gmc.agp_end >> 24;
1479
1480 /* AGP aperture is disabled */
1481 if (agp_bot > agp_top) {
1482 logical_addr_low = adev->gmc.fb_start >> 18;
1483 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1484 AMD_APU_IS_RENOIR |
1485 AMD_APU_IS_GREEN_SARDINE))
1486 /*
1487 * Raven2 has a HW issue that it is unable to use the vram which
1488 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1489 * workaround that increase system aperture high address (add 1)
1490 * to get rid of the VM fault and hardware hang.
1491 */
1492 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1493 else
1494 logical_addr_high = adev->gmc.fb_end >> 18;
1495 } else {
1496 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1497 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1498 AMD_APU_IS_RENOIR |
1499 AMD_APU_IS_GREEN_SARDINE))
1500 /*
1501 * Raven2 has a HW issue that it is unable to use the vram which
1502 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1503 * workaround that increase system aperture high address (add 1)
1504 * to get rid of the VM fault and hardware hang.
1505 */
1506 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1507 else
1508 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1509 }
1510
1511 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1512
1513 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1514 AMDGPU_GPU_PAGE_SHIFT);
1515 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1516 AMDGPU_GPU_PAGE_SHIFT);
1517 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1518 AMDGPU_GPU_PAGE_SHIFT);
1519 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1520 AMDGPU_GPU_PAGE_SHIFT);
1521 page_table_base.high_part = upper_32_bits(pt_base);
1522 page_table_base.low_part = lower_32_bits(pt_base);
1523
1524 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1525 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1526
1527 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
1528 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1529 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1530
1531 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1532 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1533 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1534
1535 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1536 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1537 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1538
1539 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1540
1541 }
1542
force_connector_state(struct amdgpu_dm_connector * aconnector,enum drm_connector_force force_state)1543 static void force_connector_state(
1544 struct amdgpu_dm_connector *aconnector,
1545 enum drm_connector_force force_state)
1546 {
1547 struct drm_connector *connector = &aconnector->base;
1548
1549 mutex_lock(&connector->dev->mode_config.mutex);
1550 aconnector->base.force = force_state;
1551 mutex_unlock(&connector->dev->mode_config.mutex);
1552
1553 mutex_lock(&aconnector->hpd_lock);
1554 drm_kms_helper_connector_hotplug_event(connector);
1555 mutex_unlock(&aconnector->hpd_lock);
1556 }
1557
dm_handle_hpd_rx_offload_work(struct work_struct * work)1558 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1559 {
1560 struct hpd_rx_irq_offload_work *offload_work;
1561 struct amdgpu_dm_connector *aconnector;
1562 struct dc_link *dc_link;
1563 struct amdgpu_device *adev;
1564 enum dc_connection_type new_connection_type = dc_connection_none;
1565 unsigned long flags;
1566 union test_response test_response;
1567
1568 memset(&test_response, 0, sizeof(test_response));
1569
1570 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1571 aconnector = offload_work->offload_wq->aconnector;
1572 adev = offload_work->adev;
1573
1574 if (!aconnector) {
1575 drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
1576 goto skip;
1577 }
1578
1579 dc_link = aconnector->dc_link;
1580
1581 mutex_lock(&aconnector->hpd_lock);
1582 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1583 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
1584 mutex_unlock(&aconnector->hpd_lock);
1585
1586 if (new_connection_type == dc_connection_none)
1587 goto skip;
1588
1589 if (amdgpu_in_reset(adev))
1590 goto skip;
1591
1592 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1593 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1594 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1595 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1596 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1597 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1598 goto skip;
1599 }
1600
1601 mutex_lock(&adev->dm.dc_lock);
1602 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1603 dc_link_dp_handle_automated_test(dc_link);
1604
1605 if (aconnector->timing_changed) {
1606 /* force connector disconnect and reconnect */
1607 force_connector_state(aconnector, DRM_FORCE_OFF);
1608 msleep(100);
1609 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1610 }
1611
1612 test_response.bits.ACK = 1;
1613
1614 core_link_write_dpcd(
1615 dc_link,
1616 DP_TEST_RESPONSE,
1617 &test_response.raw,
1618 sizeof(test_response));
1619 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1620 dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1621 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1622 /* offload_work->data is from handle_hpd_rx_irq->
1623 * schedule_hpd_rx_offload_work.this is defer handle
1624 * for hpd short pulse. upon here, link status may be
1625 * changed, need get latest link status from dpcd
1626 * registers. if link status is good, skip run link
1627 * training again.
1628 */
1629 union hpd_irq_data irq_data;
1630
1631 memset(&irq_data, 0, sizeof(irq_data));
1632
1633 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1634 * request be added to work queue if link lost at end of dc_link_
1635 * dp_handle_link_loss
1636 */
1637 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1638 offload_work->offload_wq->is_handling_link_loss = false;
1639 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1640
1641 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1642 dc_link_check_link_loss_status(dc_link, &irq_data))
1643 dc_link_dp_handle_link_loss(dc_link);
1644 }
1645 mutex_unlock(&adev->dm.dc_lock);
1646
1647 skip:
1648 kfree(offload_work);
1649
1650 }
1651
hpd_rx_irq_create_workqueue(struct amdgpu_device * adev)1652 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
1653 {
1654 struct dc *dc = adev->dm.dc;
1655 int max_caps = dc->caps.max_links;
1656 int i = 0;
1657 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1658
1659 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1660
1661 if (!hpd_rx_offload_wq)
1662 return NULL;
1663
1664
1665 for (i = 0; i < max_caps; i++) {
1666 hpd_rx_offload_wq[i].wq =
1667 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1668
1669 if (hpd_rx_offload_wq[i].wq == NULL) {
1670 drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
1671 goto out_err;
1672 }
1673
1674 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1675 }
1676
1677 return hpd_rx_offload_wq;
1678
1679 out_err:
1680 for (i = 0; i < max_caps; i++) {
1681 if (hpd_rx_offload_wq[i].wq)
1682 destroy_workqueue(hpd_rx_offload_wq[i].wq);
1683 }
1684 kfree(hpd_rx_offload_wq);
1685 return NULL;
1686 }
1687
1688 struct amdgpu_stutter_quirk {
1689 u16 chip_vendor;
1690 u16 chip_device;
1691 u16 subsys_vendor;
1692 u16 subsys_device;
1693 u8 revision;
1694 };
1695
1696 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1697 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1698 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1699 { 0, 0, 0, 0, 0 },
1700 };
1701
dm_should_disable_stutter(struct pci_dev * pdev)1702 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1703 {
1704 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1705
1706 while (p && p->chip_device != 0) {
1707 if (pdev->vendor == p->chip_vendor &&
1708 pdev->device == p->chip_device &&
1709 pdev->subsystem_vendor == p->subsys_vendor &&
1710 pdev->subsystem_device == p->subsys_device &&
1711 pdev->revision == p->revision) {
1712 return true;
1713 }
1714 ++p;
1715 }
1716 return false;
1717 }
1718
1719
1720 void*
dm_allocate_gpu_mem(struct amdgpu_device * adev,enum dc_gpu_mem_alloc_type type,size_t size,long long * addr)1721 dm_allocate_gpu_mem(
1722 struct amdgpu_device *adev,
1723 enum dc_gpu_mem_alloc_type type,
1724 size_t size,
1725 long long *addr)
1726 {
1727 struct dal_allocation *da;
1728 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
1729 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
1730 int ret;
1731
1732 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
1733 if (!da)
1734 return NULL;
1735
1736 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
1737 domain, &da->bo,
1738 &da->gpu_addr, &da->cpu_ptr);
1739
1740 *addr = da->gpu_addr;
1741
1742 if (ret) {
1743 kfree(da);
1744 return NULL;
1745 }
1746
1747 /* add da to list in dm */
1748 list_add(&da->list, &adev->dm.da_list);
1749
1750 return da->cpu_ptr;
1751 }
1752
1753 void
dm_free_gpu_mem(struct amdgpu_device * adev,enum dc_gpu_mem_alloc_type type,void * pvMem)1754 dm_free_gpu_mem(
1755 struct amdgpu_device *adev,
1756 enum dc_gpu_mem_alloc_type type,
1757 void *pvMem)
1758 {
1759 struct dal_allocation *da;
1760
1761 /* walk the da list in DM */
1762 list_for_each_entry(da, &adev->dm.da_list, list) {
1763 if (pvMem == da->cpu_ptr) {
1764 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
1765 list_del(&da->list);
1766 kfree(da);
1767 break;
1768 }
1769 }
1770
1771 }
1772
1773 static enum dmub_status
dm_dmub_send_vbios_gpint_command(struct amdgpu_device * adev,enum dmub_gpint_command command_code,uint16_t param,uint32_t timeout_us)1774 dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
1775 enum dmub_gpint_command command_code,
1776 uint16_t param,
1777 uint32_t timeout_us)
1778 {
1779 union dmub_gpint_data_register reg, test;
1780 uint32_t i;
1781
1782 /* Assume that VBIOS DMUB is ready to take commands */
1783
1784 reg.bits.status = 1;
1785 reg.bits.command_code = command_code;
1786 reg.bits.param = param;
1787
1788 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);
1789
1790 for (i = 0; i < timeout_us; ++i) {
1791 udelay(1);
1792
1793 /* Check if our GPINT got acked */
1794 reg.bits.status = 0;
1795 test = (union dmub_gpint_data_register)
1796 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);
1797
1798 if (test.all == reg.all)
1799 return DMUB_STATUS_OK;
1800 }
1801
1802 return DMUB_STATUS_TIMEOUT;
1803 }
1804
dm_dmub_get_vbios_bounding_box(struct amdgpu_device * adev)1805 static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
1806 {
1807 void *bb;
1808 long long addr;
1809 unsigned int bb_size;
1810 int i = 0;
1811 uint16_t chunk;
1812 enum dmub_gpint_command send_addrs[] = {
1813 DMUB_GPINT__SET_BB_ADDR_WORD0,
1814 DMUB_GPINT__SET_BB_ADDR_WORD1,
1815 DMUB_GPINT__SET_BB_ADDR_WORD2,
1816 DMUB_GPINT__SET_BB_ADDR_WORD3,
1817 };
1818 enum dmub_status ret;
1819
1820 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1821 case IP_VERSION(4, 0, 1):
1822 bb_size = sizeof(struct dml2_soc_bb);
1823 break;
1824 default:
1825 return NULL;
1826 }
1827
1828 bb = dm_allocate_gpu_mem(adev,
1829 DC_MEM_ALLOC_TYPE_GART,
1830 bb_size,
1831 &addr);
1832 if (!bb)
1833 return NULL;
1834
1835 for (i = 0; i < 4; i++) {
1836 /* Extract 16-bit chunk */
1837 chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF;
1838 /* Send the chunk */
1839 ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
1840 if (ret != DMUB_STATUS_OK)
1841 goto free_bb;
1842 }
1843
1844 /* Now ask DMUB to copy the bb */
1845 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
1846 if (ret != DMUB_STATUS_OK)
1847 goto free_bb;
1848
1849 return bb;
1850
1851 free_bb:
1852 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
1853 return NULL;
1854
1855 }
1856
dm_get_default_ips_mode(struct amdgpu_device * adev)1857 static enum dmub_ips_disable_type dm_get_default_ips_mode(
1858 struct amdgpu_device *adev)
1859 {
1860 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;
1861
1862 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1863 case IP_VERSION(3, 5, 0):
1864 case IP_VERSION(3, 6, 0):
1865 case IP_VERSION(3, 5, 1):
1866 ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
1867 break;
1868 default:
1869 /* ASICs older than DCN35 do not have IPSs */
1870 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
1871 ret = DMUB_IPS_DISABLE_ALL;
1872 break;
1873 }
1874
1875 return ret;
1876 }
1877
amdgpu_dm_init(struct amdgpu_device * adev)1878 static int amdgpu_dm_init(struct amdgpu_device *adev)
1879 {
1880 struct dc_init_data init_data;
1881 struct dc_callback_init init_params;
1882 int r;
1883
1884 adev->dm.ddev = adev_to_drm(adev);
1885 adev->dm.adev = adev;
1886
1887 /* Zero all the fields */
1888 memset(&init_data, 0, sizeof(init_data));
1889 memset(&init_params, 0, sizeof(init_params));
1890
1891 mutex_init(&adev->dm.dpia_aux_lock);
1892 mutex_init(&adev->dm.dc_lock);
1893 mutex_init(&adev->dm.audio_lock);
1894
1895 if (amdgpu_dm_irq_init(adev)) {
1896 drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
1897 goto error;
1898 }
1899
1900 init_data.asic_id.chip_family = adev->family;
1901
1902 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1903 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1904 init_data.asic_id.chip_id = adev->pdev->device;
1905
1906 init_data.asic_id.vram_width = adev->gmc.vram_width;
1907 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1908 init_data.asic_id.atombios_base_address =
1909 adev->mode_info.atom_context->bios;
1910
1911 init_data.driver = adev;
1912
1913 /* cgs_device was created in dm_sw_init() */
1914 init_data.cgs_device = adev->dm.cgs_device;
1915
1916 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1917
1918 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1919 case IP_VERSION(2, 1, 0):
1920 switch (adev->dm.dmcub_fw_version) {
1921 case 0: /* development */
1922 case 0x1: /* linux-firmware.git hash 6d9f399 */
1923 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1924 init_data.flags.disable_dmcu = false;
1925 break;
1926 default:
1927 init_data.flags.disable_dmcu = true;
1928 }
1929 break;
1930 case IP_VERSION(2, 0, 3):
1931 init_data.flags.disable_dmcu = true;
1932 break;
1933 default:
1934 break;
1935 }
1936
1937 /* APU support S/G display by default except:
1938 * ASICs before Carrizo,
1939 * RAVEN1 (Users reported stability issue)
1940 */
1941
1942 if (adev->asic_type < CHIP_CARRIZO) {
1943 init_data.flags.gpu_vm_support = false;
1944 } else if (adev->asic_type == CHIP_RAVEN) {
1945 if (adev->apu_flags & AMD_APU_IS_RAVEN)
1946 init_data.flags.gpu_vm_support = false;
1947 else
1948 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
1949 } else {
1950 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
1951 init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
1952 else
1953 init_data.flags.gpu_vm_support =
1954 (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
1955 }
1956
1957 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
1958
1959 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1960 init_data.flags.fbc_support = true;
1961
1962 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1963 init_data.flags.multi_mon_pp_mclk_switch = true;
1964
1965 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1966 init_data.flags.disable_fractional_pwm = true;
1967
1968 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1969 init_data.flags.edp_no_power_sequencing = true;
1970
1971 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1972 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1973 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1974 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1975
1976 init_data.flags.seamless_boot_edp_requested = false;
1977
1978 if (amdgpu_device_seamless_boot_supported(adev)) {
1979 init_data.flags.seamless_boot_edp_requested = true;
1980 init_data.flags.allow_seamless_boot_optimization = true;
1981 drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
1982 }
1983
1984 init_data.flags.enable_mipi_converter_optimization = true;
1985
1986 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1987 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1988 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
1989
1990 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
1991 init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
1992 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
1993 init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
1994 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
1995 init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
1996 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
1997 init_data.flags.disable_ips = DMUB_IPS_ENABLE;
1998 else
1999 init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
2000
2001 init_data.flags.disable_ips_in_vpb = 0;
2002
2003 /* DCN35 and above supports dynamic DTBCLK switch */
2004 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
2005 init_data.flags.allow_0_dtb_clk = true;
2006
2007 /* Enable DWB for tested platforms only */
2008 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
2009 init_data.num_virtual_links = 1;
2010
2011 retrieve_dmi_info(&adev->dm);
2012 if (adev->dm.edp0_on_dp1_quirk)
2013 init_data.flags.support_edp0_on_dp1 = true;
2014
2015 if (adev->dm.bb_from_dmub)
2016 init_data.bb_from_dmub = adev->dm.bb_from_dmub;
2017 else
2018 init_data.bb_from_dmub = NULL;
2019
2020 /* Display Core create. */
2021 adev->dm.dc = dc_create(&init_data);
2022
2023 if (adev->dm.dc) {
2024 drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
2025 dce_version_to_string(adev->dm.dc->ctx->dce_version));
2026 } else {
2027 drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
2028 goto error;
2029 }
2030
2031 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
2032 adev->dm.dc->debug.force_single_disp_pipe_split = false;
2033 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
2034 }
2035
2036 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2037 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2038 if (dm_should_disable_stutter(adev->pdev))
2039 adev->dm.dc->debug.disable_stutter = true;
2040
2041 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
2042 adev->dm.dc->debug.disable_stutter = true;
2043
2044 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
2045 adev->dm.dc->debug.disable_dsc = true;
2046
2047 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
2048 adev->dm.dc->debug.disable_clock_gate = true;
2049
2050 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
2051 adev->dm.dc->debug.force_subvp_mclk_switch = true;
2052
2053 if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
2054 adev->dm.dc->debug.force_disable_subvp = true;
2055 adev->dm.dc->debug.fams2_config.bits.enable = false;
2056 }
2057
2058 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
2059 adev->dm.dc->debug.using_dml2 = true;
2060 adev->dm.dc->debug.using_dml21 = true;
2061 }
2062
2063 if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
2064 adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
2065
2066 if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
2067 adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
2068
2069 if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
2070 adev->dm.dc->debug.skip_detection_link_training = true;
2071
2072 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
2073
2074 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
2075 adev->dm.dc->debug.ignore_cable_id = true;
2076
2077 if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
2078 drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
2079
2080 r = dm_dmub_hw_init(adev);
2081 if (r) {
2082 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
2083 goto error;
2084 }
2085
2086 dc_hardware_init(adev->dm.dc);
2087
2088 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
2089 if (!adev->dm.hpd_rx_offload_wq) {
2090 drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
2091 goto error;
2092 }
2093
2094 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
2095 struct dc_phy_addr_space_config pa_config;
2096
2097 mmhub_read_system_context(adev, &pa_config);
2098
2099 // Call the DC init_memory func
2100 dc_setup_system_context(adev->dm.dc, &pa_config);
2101 }
2102
2103 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
2104 if (!adev->dm.freesync_module) {
2105 drm_err(adev_to_drm(adev),
2106 "failed to initialize freesync_module.\n");
2107 } else
2108 drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
2109 adev->dm.freesync_module);
2110
2111 amdgpu_dm_init_color_mod();
2112
2113 if (adev->dm.dc->caps.max_links > 0) {
2114 adev->dm.vblank_control_workqueue =
2115 create_singlethread_workqueue("dm_vblank_control_workqueue");
2116 if (!adev->dm.vblank_control_workqueue)
2117 drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
2118 }
2119
2120 if (adev->dm.dc->caps.ips_support &&
2121 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
2122 adev->dm.idle_workqueue = idle_create_workqueue(adev);
2123
2124 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
2125 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
2126
2127 if (!adev->dm.hdcp_workqueue)
2128 drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
2129 else
2130 drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
2131
2132 dc_init_callbacks(adev->dm.dc, &init_params);
2133 }
2134 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2135 init_completion(&adev->dm.dmub_aux_transfer_done);
2136 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
2137 if (!adev->dm.dmub_notify) {
2138 drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
2139 goto error;
2140 }
2141
2142 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
2143 if (!adev->dm.delayed_hpd_wq) {
2144 drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
2145 goto error;
2146 }
2147
2148 amdgpu_dm_outbox_init(adev);
2149 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
2150 dmub_aux_setconfig_callback, false)) {
2151 drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
2152 goto error;
2153 }
2154
2155 for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
2156 init_completion(&adev->dm.fused_io[i].replied);
2157
2158 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
2159 dmub_aux_fused_io_callback, false)) {
2160 drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
2161 goto error;
2162 }
2163 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
2164 * It is expected that DMUB will resend any pending notifications at this point. Note
2165 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
2166 * align legacy interface initialization sequence. Connection status will be proactivly
2167 * detected once in the amdgpu_dm_initialize_drm_device.
2168 */
2169 dc_enable_dmub_outbox(adev->dm.dc);
2170
2171 /* DPIA trace goes to dmesg logs only if outbox is enabled */
2172 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
2173 dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
2174 }
2175
2176 if (amdgpu_dm_initialize_drm_device(adev)) {
2177 drm_err(adev_to_drm(adev),
2178 "failed to initialize sw for display support.\n");
2179 goto error;
2180 }
2181
2182 /* create fake encoders for MST */
2183 dm_dp_create_fake_mst_encoders(adev);
2184
2185 /* TODO: Add_display_info? */
2186
2187 /* TODO use dynamic cursor width */
2188 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
2189 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
2190
2191 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
2192 drm_err(adev_to_drm(adev),
2193 "failed to initialize vblank for display support.\n");
2194 goto error;
2195 }
2196
2197 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2198 amdgpu_dm_crtc_secure_display_create_contexts(adev);
2199 if (!adev->dm.secure_display_ctx.crtc_ctx)
2200 drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
2201
2202 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
2203 adev->dm.secure_display_ctx.support_mul_roi = true;
2204
2205 #endif
2206
2207 drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
2208
2209 return 0;
2210 error:
2211 amdgpu_dm_fini(adev);
2212
2213 return -EINVAL;
2214 }
2215
amdgpu_dm_early_fini(struct amdgpu_ip_block * ip_block)2216 static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block)
2217 {
2218 struct amdgpu_device *adev = ip_block->adev;
2219
2220 amdgpu_dm_audio_fini(adev);
2221
2222 return 0;
2223 }
2224
amdgpu_dm_fini(struct amdgpu_device * adev)2225 static void amdgpu_dm_fini(struct amdgpu_device *adev)
2226 {
2227 int i;
2228
2229 if (adev->dm.vblank_control_workqueue) {
2230 destroy_workqueue(adev->dm.vblank_control_workqueue);
2231 adev->dm.vblank_control_workqueue = NULL;
2232 }
2233
2234 if (adev->dm.idle_workqueue) {
2235 if (adev->dm.idle_workqueue->running) {
2236 adev->dm.idle_workqueue->enable = false;
2237 flush_work(&adev->dm.idle_workqueue->work);
2238 }
2239
2240 kfree(adev->dm.idle_workqueue);
2241 adev->dm.idle_workqueue = NULL;
2242 }
2243
2244 amdgpu_dm_destroy_drm_device(&adev->dm);
2245
2246 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2247 if (adev->dm.secure_display_ctx.crtc_ctx) {
2248 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2249 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
2250 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
2251 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
2252 }
2253 }
2254 kfree(adev->dm.secure_display_ctx.crtc_ctx);
2255 adev->dm.secure_display_ctx.crtc_ctx = NULL;
2256 }
2257 #endif
2258 if (adev->dm.hdcp_workqueue) {
2259 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
2260 adev->dm.hdcp_workqueue = NULL;
2261 }
2262
2263 if (adev->dm.dc) {
2264 dc_deinit_callbacks(adev->dm.dc);
2265 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
2266 if (dc_enable_dmub_notifications(adev->dm.dc)) {
2267 kfree(adev->dm.dmub_notify);
2268 adev->dm.dmub_notify = NULL;
2269 destroy_workqueue(adev->dm.delayed_hpd_wq);
2270 adev->dm.delayed_hpd_wq = NULL;
2271 }
2272 }
2273
2274 if (adev->dm.dmub_bo)
2275 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
2276 &adev->dm.dmub_bo_gpu_addr,
2277 &adev->dm.dmub_bo_cpu_addr);
2278
2279 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
2280 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
2281 if (adev->dm.hpd_rx_offload_wq[i].wq) {
2282 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
2283 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
2284 }
2285 }
2286
2287 kfree(adev->dm.hpd_rx_offload_wq);
2288 adev->dm.hpd_rx_offload_wq = NULL;
2289 }
2290
2291 /* DC Destroy TODO: Replace destroy DAL */
2292 if (adev->dm.dc)
2293 dc_destroy(&adev->dm.dc);
2294 /*
2295 * TODO: pageflip, vlank interrupt
2296 *
2297 * amdgpu_dm_irq_fini(adev);
2298 */
2299
2300 if (adev->dm.cgs_device) {
2301 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
2302 adev->dm.cgs_device = NULL;
2303 }
2304 if (adev->dm.freesync_module) {
2305 mod_freesync_destroy(adev->dm.freesync_module);
2306 adev->dm.freesync_module = NULL;
2307 }
2308
2309 mutex_destroy(&adev->dm.audio_lock);
2310 mutex_destroy(&adev->dm.dc_lock);
2311 mutex_destroy(&adev->dm.dpia_aux_lock);
2312 }
2313
load_dmcu_fw(struct amdgpu_device * adev)2314 static int load_dmcu_fw(struct amdgpu_device *adev)
2315 {
2316 const char *fw_name_dmcu = NULL;
2317 int r;
2318 const struct dmcu_firmware_header_v1_0 *hdr;
2319
2320 switch (adev->asic_type) {
2321 #if defined(CONFIG_DRM_AMD_DC_SI)
2322 case CHIP_TAHITI:
2323 case CHIP_PITCAIRN:
2324 case CHIP_VERDE:
2325 case CHIP_OLAND:
2326 #endif
2327 case CHIP_BONAIRE:
2328 case CHIP_HAWAII:
2329 case CHIP_KAVERI:
2330 case CHIP_KABINI:
2331 case CHIP_MULLINS:
2332 case CHIP_TONGA:
2333 case CHIP_FIJI:
2334 case CHIP_CARRIZO:
2335 case CHIP_STONEY:
2336 case CHIP_POLARIS11:
2337 case CHIP_POLARIS10:
2338 case CHIP_POLARIS12:
2339 case CHIP_VEGAM:
2340 case CHIP_VEGA10:
2341 case CHIP_VEGA12:
2342 case CHIP_VEGA20:
2343 return 0;
2344 case CHIP_NAVI12:
2345 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
2346 break;
2347 case CHIP_RAVEN:
2348 if (ASICREV_IS_PICASSO(adev->external_rev_id))
2349 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2350 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
2351 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2352 else
2353 return 0;
2354 break;
2355 default:
2356 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2357 case IP_VERSION(2, 0, 2):
2358 case IP_VERSION(2, 0, 3):
2359 case IP_VERSION(2, 0, 0):
2360 case IP_VERSION(2, 1, 0):
2361 case IP_VERSION(3, 0, 0):
2362 case IP_VERSION(3, 0, 2):
2363 case IP_VERSION(3, 0, 3):
2364 case IP_VERSION(3, 0, 1):
2365 case IP_VERSION(3, 1, 2):
2366 case IP_VERSION(3, 1, 3):
2367 case IP_VERSION(3, 1, 4):
2368 case IP_VERSION(3, 1, 5):
2369 case IP_VERSION(3, 1, 6):
2370 case IP_VERSION(3, 2, 0):
2371 case IP_VERSION(3, 2, 1):
2372 case IP_VERSION(3, 5, 0):
2373 case IP_VERSION(3, 5, 1):
2374 case IP_VERSION(3, 6, 0):
2375 case IP_VERSION(4, 0, 1):
2376 return 0;
2377 default:
2378 break;
2379 }
2380 drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
2381 return -EINVAL;
2382 }
2383
2384 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2385 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2386 return 0;
2387 }
2388
2389 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
2390 "%s", fw_name_dmcu);
2391 if (r == -ENODEV) {
2392 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2393 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2394 adev->dm.fw_dmcu = NULL;
2395 return 0;
2396 }
2397 if (r) {
2398 drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
2399 fw_name_dmcu);
2400 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2401 return r;
2402 }
2403
2404 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2405 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2407 adev->firmware.fw_size +=
2408 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2409
2410 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2412 adev->firmware.fw_size +=
2413 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2414
2415 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2416
2417 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2418
2419 return 0;
2420 }
2421
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)2422 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2423 {
2424 struct amdgpu_device *adev = ctx;
2425
2426 return dm_read_reg(adev->dm.dc->ctx, address);
2427 }
2428
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)2429 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2430 uint32_t value)
2431 {
2432 struct amdgpu_device *adev = ctx;
2433
2434 return dm_write_reg(adev->dm.dc->ctx, address, value);
2435 }
2436
dm_dmub_sw_init(struct amdgpu_device * adev)2437 static int dm_dmub_sw_init(struct amdgpu_device *adev)
2438 {
2439 struct dmub_srv_create_params create_params;
2440 struct dmub_srv_region_params region_params;
2441 struct dmub_srv_region_info region_info;
2442 struct dmub_srv_memory_params memory_params;
2443 struct dmub_srv_fb_info *fb_info;
2444 struct dmub_srv *dmub_srv;
2445 const struct dmcub_firmware_header_v1_0 *hdr;
2446 enum dmub_asic dmub_asic;
2447 enum dmub_status status;
2448 static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
2449 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
2450 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
2451 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
2452 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
2453 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
2454 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
2455 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
2456 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
2457 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
2458 DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
2459 };
2460 int r;
2461
2462 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2463 case IP_VERSION(2, 1, 0):
2464 dmub_asic = DMUB_ASIC_DCN21;
2465 break;
2466 case IP_VERSION(3, 0, 0):
2467 dmub_asic = DMUB_ASIC_DCN30;
2468 break;
2469 case IP_VERSION(3, 0, 1):
2470 dmub_asic = DMUB_ASIC_DCN301;
2471 break;
2472 case IP_VERSION(3, 0, 2):
2473 dmub_asic = DMUB_ASIC_DCN302;
2474 break;
2475 case IP_VERSION(3, 0, 3):
2476 dmub_asic = DMUB_ASIC_DCN303;
2477 break;
2478 case IP_VERSION(3, 1, 2):
2479 case IP_VERSION(3, 1, 3):
2480 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2481 break;
2482 case IP_VERSION(3, 1, 4):
2483 dmub_asic = DMUB_ASIC_DCN314;
2484 break;
2485 case IP_VERSION(3, 1, 5):
2486 dmub_asic = DMUB_ASIC_DCN315;
2487 break;
2488 case IP_VERSION(3, 1, 6):
2489 dmub_asic = DMUB_ASIC_DCN316;
2490 break;
2491 case IP_VERSION(3, 2, 0):
2492 dmub_asic = DMUB_ASIC_DCN32;
2493 break;
2494 case IP_VERSION(3, 2, 1):
2495 dmub_asic = DMUB_ASIC_DCN321;
2496 break;
2497 case IP_VERSION(3, 5, 0):
2498 case IP_VERSION(3, 5, 1):
2499 dmub_asic = DMUB_ASIC_DCN35;
2500 break;
2501 case IP_VERSION(3, 6, 0):
2502 dmub_asic = DMUB_ASIC_DCN36;
2503 break;
2504 case IP_VERSION(4, 0, 1):
2505 dmub_asic = DMUB_ASIC_DCN401;
2506 break;
2507
2508 default:
2509 /* ASIC doesn't support DMUB. */
2510 return 0;
2511 }
2512
2513 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2514 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2515
2516 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2517 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2518 AMDGPU_UCODE_ID_DMCUB;
2519 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2520 adev->dm.dmub_fw;
2521 adev->firmware.fw_size +=
2522 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2523
2524 drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
2525 adev->dm.dmcub_fw_version);
2526 }
2527
2528
2529 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2530 dmub_srv = adev->dm.dmub_srv;
2531
2532 if (!dmub_srv) {
2533 drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
2534 return -ENOMEM;
2535 }
2536
2537 memset(&create_params, 0, sizeof(create_params));
2538 create_params.user_ctx = adev;
2539 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2540 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2541 create_params.asic = dmub_asic;
2542
2543 /* Create the DMUB service. */
2544 status = dmub_srv_create(dmub_srv, &create_params);
2545 if (status != DMUB_STATUS_OK) {
2546 drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
2547 return -EINVAL;
2548 }
2549
2550 /* Calculate the size of all the regions for the DMUB service. */
2551 memset(®ion_params, 0, sizeof(region_params));
2552
2553 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2554 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2555 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2556 region_params.vbios_size = adev->bios_size;
2557 region_params.fw_bss_data = region_params.bss_data_size ?
2558 adev->dm.dmub_fw->data +
2559 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2560 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2561 region_params.fw_inst_const =
2562 adev->dm.dmub_fw->data +
2563 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2564 PSP_HEADER_BYTES;
2565 region_params.window_memory_type = window_memory_type;
2566
2567 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2568 ®ion_info);
2569
2570 if (status != DMUB_STATUS_OK) {
2571 drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
2572 return -EINVAL;
2573 }
2574
2575 /*
2576 * Allocate a framebuffer based on the total size of all the regions.
2577 * TODO: Move this into GART.
2578 */
2579 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2580 AMDGPU_GEM_DOMAIN_VRAM |
2581 AMDGPU_GEM_DOMAIN_GTT,
2582 &adev->dm.dmub_bo,
2583 &adev->dm.dmub_bo_gpu_addr,
2584 &adev->dm.dmub_bo_cpu_addr);
2585 if (r)
2586 return r;
2587
2588 /* Rebase the regions on the framebuffer address. */
2589 memset(&memory_params, 0, sizeof(memory_params));
2590 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2591 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2592 memory_params.region_info = ®ion_info;
2593 memory_params.window_memory_type = window_memory_type;
2594
2595 adev->dm.dmub_fb_info =
2596 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2597 fb_info = adev->dm.dmub_fb_info;
2598
2599 if (!fb_info) {
2600 drm_err(adev_to_drm(adev),
2601 "Failed to allocate framebuffer info for DMUB service!\n");
2602 return -ENOMEM;
2603 }
2604
2605 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
2606 if (status != DMUB_STATUS_OK) {
2607 drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
2608 return -EINVAL;
2609 }
2610
2611 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev);
2612
2613 return 0;
2614 }
2615
dm_sw_init(struct amdgpu_ip_block * ip_block)2616 static int dm_sw_init(struct amdgpu_ip_block *ip_block)
2617 {
2618 struct amdgpu_device *adev = ip_block->adev;
2619 int r;
2620
2621 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
2622
2623 if (!adev->dm.cgs_device) {
2624 drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
2625 return -EINVAL;
2626 }
2627
2628 /* Moved from dm init since we need to use allocations for storing bounding box data */
2629 INIT_LIST_HEAD(&adev->dm.da_list);
2630
2631 r = dm_dmub_sw_init(adev);
2632 if (r)
2633 return r;
2634
2635 return load_dmcu_fw(adev);
2636 }
2637
dm_sw_fini(struct amdgpu_ip_block * ip_block)2638 static int dm_sw_fini(struct amdgpu_ip_block *ip_block)
2639 {
2640 struct amdgpu_device *adev = ip_block->adev;
2641 struct dal_allocation *da;
2642
2643 list_for_each_entry(da, &adev->dm.da_list, list) {
2644 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) {
2645 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
2646 list_del(&da->list);
2647 kfree(da);
2648 adev->dm.bb_from_dmub = NULL;
2649 break;
2650 }
2651 }
2652
2653
2654 kfree(adev->dm.dmub_fb_info);
2655 adev->dm.dmub_fb_info = NULL;
2656
2657 if (adev->dm.dmub_srv) {
2658 dmub_srv_destroy(adev->dm.dmub_srv);
2659 kfree(adev->dm.dmub_srv);
2660 adev->dm.dmub_srv = NULL;
2661 }
2662
2663 amdgpu_ucode_release(&adev->dm.dmub_fw);
2664 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2665
2666 return 0;
2667 }
2668
detect_mst_link_for_all_connectors(struct drm_device * dev)2669 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2670 {
2671 struct amdgpu_dm_connector *aconnector;
2672 struct drm_connector *connector;
2673 struct drm_connector_list_iter iter;
2674 int ret = 0;
2675
2676 drm_connector_list_iter_begin(dev, &iter);
2677 drm_for_each_connector_iter(connector, &iter) {
2678
2679 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2680 continue;
2681
2682 aconnector = to_amdgpu_dm_connector(connector);
2683 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2684 aconnector->mst_mgr.aux) {
2685 drm_dbg_kms(dev, "DM_MST: starting TM on aconnector: %p [id: %d]\n",
2686 aconnector,
2687 aconnector->base.base.id);
2688
2689 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2690 if (ret < 0) {
2691 drm_err(dev, "DM_MST: Failed to start MST\n");
2692 aconnector->dc_link->type =
2693 dc_connection_single;
2694 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2695 aconnector->dc_link);
2696 break;
2697 }
2698 }
2699 }
2700 drm_connector_list_iter_end(&iter);
2701
2702 return ret;
2703 }
2704
dm_late_init(struct amdgpu_ip_block * ip_block)2705 static int dm_late_init(struct amdgpu_ip_block *ip_block)
2706 {
2707 struct amdgpu_device *adev = ip_block->adev;
2708
2709 struct dmcu_iram_parameters params;
2710 unsigned int linear_lut[16];
2711 int i;
2712 struct dmcu *dmcu = NULL;
2713
2714 dmcu = adev->dm.dc->res_pool->dmcu;
2715
2716 for (i = 0; i < 16; i++)
2717 linear_lut[i] = 0xFFFF * i / 15;
2718
2719 params.set = 0;
2720 params.backlight_ramping_override = false;
2721 params.backlight_ramping_start = 0xCCCC;
2722 params.backlight_ramping_reduction = 0xCCCCCCCC;
2723 params.backlight_lut_array_size = 16;
2724 params.backlight_lut_array = linear_lut;
2725
2726 /* Min backlight level after ABM reduction, Don't allow below 1%
2727 * 0xFFFF x 0.01 = 0x28F
2728 */
2729 params.min_abm_backlight = 0x28F;
2730 /* In the case where abm is implemented on dmcub,
2731 * dmcu object will be null.
2732 * ABM 2.4 and up are implemented on dmcub.
2733 */
2734 if (dmcu) {
2735 if (!dmcu_load_iram(dmcu, params))
2736 return -EINVAL;
2737 } else if (adev->dm.dc->ctx->dmub_srv) {
2738 struct dc_link *edp_links[MAX_NUM_EDP];
2739 int edp_num;
2740
2741 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2742 for (i = 0; i < edp_num; i++) {
2743 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2744 return -EINVAL;
2745 }
2746 }
2747
2748 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2749 }
2750
resume_mst_branch_status(struct drm_dp_mst_topology_mgr * mgr)2751 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2752 {
2753 u8 buf[UUID_SIZE];
2754 guid_t guid;
2755 int ret;
2756
2757 mutex_lock(&mgr->lock);
2758 if (!mgr->mst_primary)
2759 goto out_fail;
2760
2761 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2762 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2763 goto out_fail;
2764 }
2765
2766 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2767 DP_MST_EN |
2768 DP_UP_REQ_EN |
2769 DP_UPSTREAM_IS_SRC);
2770 if (ret < 0) {
2771 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2772 goto out_fail;
2773 }
2774
2775 /* Some hubs forget their guids after they resume */
2776 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
2777 if (ret != sizeof(buf)) {
2778 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2779 goto out_fail;
2780 }
2781
2782 import_guid(&guid, buf);
2783
2784 if (guid_is_null(&guid)) {
2785 guid_gen(&guid);
2786 export_guid(buf, &guid);
2787
2788 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf));
2789
2790 if (ret != sizeof(buf)) {
2791 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2792 goto out_fail;
2793 }
2794 }
2795
2796 guid_copy(&mgr->mst_primary->guid, &guid);
2797
2798 out_fail:
2799 mutex_unlock(&mgr->lock);
2800 }
2801
hdmi_cec_unset_edid(struct amdgpu_dm_connector * aconnector)2802 void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
2803 {
2804 struct cec_notifier *n = aconnector->notifier;
2805
2806 if (!n)
2807 return;
2808
2809 cec_notifier_phys_addr_invalidate(n);
2810 }
2811
hdmi_cec_set_edid(struct amdgpu_dm_connector * aconnector)2812 void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
2813 {
2814 struct drm_connector *connector = &aconnector->base;
2815 struct cec_notifier *n = aconnector->notifier;
2816
2817 if (!n)
2818 return;
2819
2820 cec_notifier_set_phys_addr(n,
2821 connector->display_info.source_physical_address);
2822 }
2823
s3_handle_hdmi_cec(struct drm_device * ddev,bool suspend)2824 static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
2825 {
2826 struct amdgpu_dm_connector *aconnector;
2827 struct drm_connector *connector;
2828 struct drm_connector_list_iter conn_iter;
2829
2830 drm_connector_list_iter_begin(ddev, &conn_iter);
2831 drm_for_each_connector_iter(connector, &conn_iter) {
2832 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2833 continue;
2834
2835 aconnector = to_amdgpu_dm_connector(connector);
2836 if (suspend)
2837 hdmi_cec_unset_edid(aconnector);
2838 else
2839 hdmi_cec_set_edid(aconnector);
2840 }
2841 drm_connector_list_iter_end(&conn_iter);
2842 }
2843
s3_handle_mst(struct drm_device * dev,bool suspend)2844 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2845 {
2846 struct amdgpu_dm_connector *aconnector;
2847 struct drm_connector *connector;
2848 struct drm_connector_list_iter iter;
2849 struct drm_dp_mst_topology_mgr *mgr;
2850
2851 drm_connector_list_iter_begin(dev, &iter);
2852 drm_for_each_connector_iter(connector, &iter) {
2853
2854 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2855 continue;
2856
2857 aconnector = to_amdgpu_dm_connector(connector);
2858 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2859 aconnector->mst_root)
2860 continue;
2861
2862 mgr = &aconnector->mst_mgr;
2863
2864 if (suspend) {
2865 drm_dp_mst_topology_mgr_suspend(mgr);
2866 } else {
2867 /* if extended timeout is supported in hardware,
2868 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2869 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2870 */
2871 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2872 if (!dp_is_lttpr_present(aconnector->dc_link))
2873 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2874
2875 /* TODO: move resume_mst_branch_status() into drm mst resume again
2876 * once topology probing work is pulled out from mst resume into mst
2877 * resume 2nd step. mst resume 2nd step should be called after old
2878 * state getting restored (i.e. drm_atomic_helper_resume()).
2879 */
2880 resume_mst_branch_status(mgr);
2881 }
2882 }
2883 drm_connector_list_iter_end(&iter);
2884 }
2885
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)2886 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2887 {
2888 int ret = 0;
2889
2890 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2891 * on window driver dc implementation.
2892 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2893 * should be passed to smu during boot up and resume from s3.
2894 * boot up: dc calculate dcn watermark clock settings within dc_create,
2895 * dcn20_resource_construct
2896 * then call pplib functions below to pass the settings to smu:
2897 * smu_set_watermarks_for_clock_ranges
2898 * smu_set_watermarks_table
2899 * navi10_set_watermarks_table
2900 * smu_write_watermarks_table
2901 *
2902 * For Renoir, clock settings of dcn watermark are also fixed values.
2903 * dc has implemented different flow for window driver:
2904 * dc_hardware_init / dc_set_power_state
2905 * dcn10_init_hw
2906 * notify_wm_ranges
2907 * set_wm_ranges
2908 * -- Linux
2909 * smu_set_watermarks_for_clock_ranges
2910 * renoir_set_watermarks_table
2911 * smu_write_watermarks_table
2912 *
2913 * For Linux,
2914 * dc_hardware_init -> amdgpu_dm_init
2915 * dc_set_power_state --> dm_resume
2916 *
2917 * therefore, this function apply to navi10/12/14 but not Renoir
2918 * *
2919 */
2920 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2921 case IP_VERSION(2, 0, 2):
2922 case IP_VERSION(2, 0, 0):
2923 break;
2924 default:
2925 return 0;
2926 }
2927
2928 ret = amdgpu_dpm_write_watermarks_table(adev);
2929 if (ret) {
2930 drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
2931 return ret;
2932 }
2933
2934 return 0;
2935 }
2936
dm_oem_i2c_hw_init(struct amdgpu_device * adev)2937 static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
2938 {
2939 struct amdgpu_display_manager *dm = &adev->dm;
2940 struct amdgpu_i2c_adapter *oem_i2c;
2941 struct ddc_service *oem_ddc_service;
2942 int r;
2943
2944 oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc);
2945 if (oem_ddc_service) {
2946 oem_i2c = create_i2c(oem_ddc_service, true);
2947 if (!oem_i2c) {
2948 drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
2949 return -ENOMEM;
2950 }
2951
2952 r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
2953 if (r) {
2954 drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
2955 kfree(oem_i2c);
2956 return r;
2957 }
2958 dm->oem_i2c = oem_i2c;
2959 }
2960
2961 return 0;
2962 }
2963
2964 /**
2965 * dm_hw_init() - Initialize DC device
2966 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
2967 *
2968 * Initialize the &struct amdgpu_display_manager device. This involves calling
2969 * the initializers of each DM component, then populating the struct with them.
2970 *
2971 * Although the function implies hardware initialization, both hardware and
2972 * software are initialized here. Splitting them out to their relevant init
2973 * hooks is a future TODO item.
2974 *
2975 * Some notable things that are initialized here:
2976 *
2977 * - Display Core, both software and hardware
2978 * - DC modules that we need (freesync and color management)
2979 * - DRM software states
2980 * - Interrupt sources and handlers
2981 * - Vblank support
2982 * - Debug FS entries, if enabled
2983 */
dm_hw_init(struct amdgpu_ip_block * ip_block)2984 static int dm_hw_init(struct amdgpu_ip_block *ip_block)
2985 {
2986 struct amdgpu_device *adev = ip_block->adev;
2987 int r;
2988
2989 /* Create DAL display manager */
2990 r = amdgpu_dm_init(adev);
2991 if (r)
2992 return r;
2993 amdgpu_dm_hpd_init(adev);
2994
2995 r = dm_oem_i2c_hw_init(adev);
2996 if (r)
2997 drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
2998
2999 return 0;
3000 }
3001
3002 /**
3003 * dm_hw_fini() - Teardown DC device
3004 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
3005 *
3006 * Teardown components within &struct amdgpu_display_manager that require
3007 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
3008 * were loaded. Also flush IRQ workqueues and disable them.
3009 */
dm_hw_fini(struct amdgpu_ip_block * ip_block)3010 static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
3011 {
3012 struct amdgpu_device *adev = ip_block->adev;
3013
3014 amdgpu_dm_hpd_fini(adev);
3015
3016 amdgpu_dm_irq_fini(adev);
3017 amdgpu_dm_fini(adev);
3018 return 0;
3019 }
3020
3021
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)3022 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
3023 struct dc_state *state, bool enable)
3024 {
3025 enum dc_irq_source irq_source;
3026 struct amdgpu_crtc *acrtc;
3027 int rc = -EBUSY;
3028 int i = 0;
3029
3030 for (i = 0; i < state->stream_count; i++) {
3031 acrtc = get_crtc_by_otg_inst(
3032 adev, state->stream_status[i].primary_otg_inst);
3033
3034 if (acrtc && state->stream_status[i].plane_count != 0) {
3035 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
3036 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3037 if (rc)
3038 drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
3039 enable ? "enable" : "disable");
3040
3041 if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
3042 if (enable) {
3043 if (amdgpu_dm_crtc_vrr_active(
3044 to_dm_crtc_state(acrtc->base.state)))
3045 rc = amdgpu_dm_crtc_set_vupdate_irq(
3046 &acrtc->base, true);
3047 } else
3048 rc = amdgpu_dm_crtc_set_vupdate_irq(
3049 &acrtc->base, false);
3050
3051 if (rc)
3052 drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
3053 enable ? "en" : "dis");
3054 }
3055
3056 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3057 /* During gpu-reset we disable and then enable vblank irq, so
3058 * don't use amdgpu_irq_get/put() to avoid refcount change.
3059 */
3060 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
3061 drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
3062 }
3063 }
3064
3065 }
3066
DEFINE_FREE(state_release,struct dc_state *,if (_T)dc_state_release (_T))3067 DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T))
3068
3069 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
3070 {
3071 struct dc_state *context __free(state_release) = NULL;
3072 int i;
3073 struct dc_stream_state *del_streams[MAX_PIPES];
3074 int del_streams_count = 0;
3075 struct dc_commit_streams_params params = {};
3076
3077 memset(del_streams, 0, sizeof(del_streams));
3078
3079 context = dc_state_create_current_copy(dc);
3080 if (context == NULL)
3081 return DC_ERROR_UNEXPECTED;
3082
3083 /* First remove from context all streams */
3084 for (i = 0; i < context->stream_count; i++) {
3085 struct dc_stream_state *stream = context->streams[i];
3086
3087 del_streams[del_streams_count++] = stream;
3088 }
3089
3090 /* Remove all planes for removed streams and then remove the streams */
3091 for (i = 0; i < del_streams_count; i++) {
3092 enum dc_status res;
3093
3094 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context))
3095 return DC_FAIL_DETACH_SURFACES;
3096
3097 res = dc_state_remove_stream(dc, context, del_streams[i]);
3098 if (res != DC_OK)
3099 return res;
3100 }
3101
3102 params.streams = context->streams;
3103 params.stream_count = context->stream_count;
3104
3105 return dc_commit_streams(dc, ¶ms);
3106 }
3107
hpd_rx_irq_work_suspend(struct amdgpu_display_manager * dm)3108 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
3109 {
3110 int i;
3111
3112 if (dm->hpd_rx_offload_wq) {
3113 for (i = 0; i < dm->dc->caps.max_links; i++)
3114 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
3115 }
3116 }
3117
dm_cache_state(struct amdgpu_device * adev)3118 static int dm_cache_state(struct amdgpu_device *adev)
3119 {
3120 int r;
3121
3122 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
3123 if (IS_ERR(adev->dm.cached_state)) {
3124 r = PTR_ERR(adev->dm.cached_state);
3125 adev->dm.cached_state = NULL;
3126 }
3127
3128 return adev->dm.cached_state ? 0 : r;
3129 }
3130
dm_destroy_cached_state(struct amdgpu_device * adev)3131 static void dm_destroy_cached_state(struct amdgpu_device *adev)
3132 {
3133 struct amdgpu_display_manager *dm = &adev->dm;
3134 struct drm_device *ddev = adev_to_drm(adev);
3135 struct dm_plane_state *dm_new_plane_state;
3136 struct drm_plane_state *new_plane_state;
3137 struct dm_crtc_state *dm_new_crtc_state;
3138 struct drm_crtc_state *new_crtc_state;
3139 struct drm_plane *plane;
3140 struct drm_crtc *crtc;
3141 int i;
3142
3143 if (!dm->cached_state)
3144 return;
3145
3146 /* Force mode set in atomic commit */
3147 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
3148 new_crtc_state->active_changed = true;
3149 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
3150 reset_freesync_config_for_crtc(dm_new_crtc_state);
3151 }
3152
3153 /*
3154 * atomic_check is expected to create the dc states. We need to release
3155 * them here, since they were duplicated as part of the suspend
3156 * procedure.
3157 */
3158 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
3159 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
3160 if (dm_new_crtc_state->stream) {
3161 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
3162 dc_stream_release(dm_new_crtc_state->stream);
3163 dm_new_crtc_state->stream = NULL;
3164 }
3165 dm_new_crtc_state->base.color_mgmt_changed = true;
3166 }
3167
3168 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
3169 dm_new_plane_state = to_dm_plane_state(new_plane_state);
3170 if (dm_new_plane_state->dc_state) {
3171 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
3172 dc_plane_state_release(dm_new_plane_state->dc_state);
3173 dm_new_plane_state->dc_state = NULL;
3174 }
3175 }
3176
3177 drm_atomic_helper_resume(ddev, dm->cached_state);
3178
3179 dm->cached_state = NULL;
3180 }
3181
dm_suspend(struct amdgpu_ip_block * ip_block)3182 static int dm_suspend(struct amdgpu_ip_block *ip_block)
3183 {
3184 struct amdgpu_device *adev = ip_block->adev;
3185 struct amdgpu_display_manager *dm = &adev->dm;
3186
3187 if (amdgpu_in_reset(adev)) {
3188 enum dc_status res;
3189
3190 mutex_lock(&dm->dc_lock);
3191
3192 dc_allow_idle_optimizations(adev->dm.dc, false);
3193
3194 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
3195
3196 if (dm->cached_dc_state)
3197 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
3198
3199 res = amdgpu_dm_commit_zero_streams(dm->dc);
3200 if (res != DC_OK) {
3201 drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res);
3202 return -EINVAL;
3203 }
3204
3205 amdgpu_dm_irq_suspend(adev);
3206
3207 hpd_rx_irq_work_suspend(dm);
3208
3209 return 0;
3210 }
3211
3212 if (!adev->dm.cached_state) {
3213 int r = dm_cache_state(adev);
3214
3215 if (r)
3216 return r;
3217 }
3218
3219 s3_handle_hdmi_cec(adev_to_drm(adev), true);
3220
3221 s3_handle_mst(adev_to_drm(adev), true);
3222
3223 amdgpu_dm_irq_suspend(adev);
3224
3225 hpd_rx_irq_work_suspend(dm);
3226
3227 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
3228
3229 if (dm->dc->caps.ips_support && adev->in_s0ix)
3230 dc_allow_idle_optimizations(dm->dc, true);
3231
3232 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
3233
3234 return 0;
3235 }
3236
3237 struct drm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)3238 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
3239 struct drm_crtc *crtc)
3240 {
3241 u32 i;
3242 struct drm_connector_state *new_con_state;
3243 struct drm_connector *connector;
3244 struct drm_crtc *crtc_from_state;
3245
3246 for_each_new_connector_in_state(state, connector, new_con_state, i) {
3247 crtc_from_state = new_con_state->crtc;
3248
3249 if (crtc_from_state == crtc)
3250 return connector;
3251 }
3252
3253 return NULL;
3254 }
3255
emulated_link_detect(struct dc_link * link)3256 static void emulated_link_detect(struct dc_link *link)
3257 {
3258 struct dc_sink_init_data sink_init_data = { 0 };
3259 struct display_sink_capability sink_caps = { 0 };
3260 enum dc_edid_status edid_status;
3261 struct dc_context *dc_ctx = link->ctx;
3262 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
3263 struct dc_sink *sink = NULL;
3264 struct dc_sink *prev_sink = NULL;
3265
3266 link->type = dc_connection_none;
3267 prev_sink = link->local_sink;
3268
3269 if (prev_sink)
3270 dc_sink_release(prev_sink);
3271
3272 switch (link->connector_signal) {
3273 case SIGNAL_TYPE_HDMI_TYPE_A: {
3274 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
3275 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
3276 break;
3277 }
3278
3279 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
3280 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
3281 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
3282 break;
3283 }
3284
3285 case SIGNAL_TYPE_DVI_DUAL_LINK: {
3286 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
3287 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
3288 break;
3289 }
3290
3291 case SIGNAL_TYPE_LVDS: {
3292 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
3293 sink_caps.signal = SIGNAL_TYPE_LVDS;
3294 break;
3295 }
3296
3297 case SIGNAL_TYPE_EDP: {
3298 sink_caps.transaction_type =
3299 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
3300 sink_caps.signal = SIGNAL_TYPE_EDP;
3301 break;
3302 }
3303
3304 case SIGNAL_TYPE_DISPLAY_PORT: {
3305 sink_caps.transaction_type =
3306 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
3307 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
3308 break;
3309 }
3310
3311 default:
3312 drm_err(dev, "Invalid connector type! signal:%d\n",
3313 link->connector_signal);
3314 return;
3315 }
3316
3317 sink_init_data.link = link;
3318 sink_init_data.sink_signal = sink_caps.signal;
3319
3320 sink = dc_sink_create(&sink_init_data);
3321 if (!sink) {
3322 drm_err(dev, "Failed to create sink!\n");
3323 return;
3324 }
3325
3326 /* dc_sink_create returns a new reference */
3327 link->local_sink = sink;
3328
3329 edid_status = dm_helpers_read_local_edid(
3330 link->ctx,
3331 link,
3332 sink);
3333
3334 if (edid_status != EDID_OK)
3335 drm_err(dev, "Failed to read EDID\n");
3336
3337 }
3338
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)3339 static void dm_gpureset_commit_state(struct dc_state *dc_state,
3340 struct amdgpu_display_manager *dm)
3341 {
3342 struct {
3343 struct dc_surface_update surface_updates[MAX_SURFACES];
3344 struct dc_plane_info plane_infos[MAX_SURFACES];
3345 struct dc_scaling_info scaling_infos[MAX_SURFACES];
3346 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
3347 struct dc_stream_update stream_update;
3348 } *bundle __free(kfree);
3349 int k, m;
3350
3351 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
3352
3353 if (!bundle) {
3354 drm_err(dm->ddev, "Failed to allocate update bundle\n");
3355 return;
3356 }
3357
3358 for (k = 0; k < dc_state->stream_count; k++) {
3359 bundle->stream_update.stream = dc_state->streams[k];
3360
3361 for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
3362 bundle->surface_updates[m].surface =
3363 dc_state->stream_status[k].plane_states[m];
3364 bundle->surface_updates[m].surface->force_full_update =
3365 true;
3366 }
3367
3368 update_planes_and_stream_adapter(dm->dc,
3369 UPDATE_TYPE_FULL,
3370 dc_state->stream_status[k].plane_count,
3371 dc_state->streams[k],
3372 &bundle->stream_update,
3373 bundle->surface_updates);
3374 }
3375 }
3376
apply_delay_after_dpcd_poweroff(struct amdgpu_device * adev,struct dc_sink * sink)3377 static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
3378 struct dc_sink *sink)
3379 {
3380 struct dc_panel_patch *ppatch = NULL;
3381
3382 if (!sink)
3383 return;
3384
3385 ppatch = &sink->edid_caps.panel_patch;
3386 if (ppatch->wait_after_dpcd_poweroff_ms) {
3387 msleep(ppatch->wait_after_dpcd_poweroff_ms);
3388 drm_dbg_driver(adev_to_drm(adev),
3389 "%s: adding a %ds delay as w/a for panel\n",
3390 __func__,
3391 ppatch->wait_after_dpcd_poweroff_ms / 1000);
3392 }
3393 }
3394
dm_resume(struct amdgpu_ip_block * ip_block)3395 static int dm_resume(struct amdgpu_ip_block *ip_block)
3396 {
3397 struct amdgpu_device *adev = ip_block->adev;
3398 struct drm_device *ddev = adev_to_drm(adev);
3399 struct amdgpu_display_manager *dm = &adev->dm;
3400 struct amdgpu_dm_connector *aconnector;
3401 struct drm_connector *connector;
3402 struct drm_connector_list_iter iter;
3403 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
3404 enum dc_connection_type new_connection_type = dc_connection_none;
3405 struct dc_state *dc_state;
3406 int i, r, j;
3407 struct dc_commit_streams_params commit_params = {};
3408
3409 if (dm->dc->caps.ips_support) {
3410 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
3411 }
3412
3413 if (amdgpu_in_reset(adev)) {
3414 dc_state = dm->cached_dc_state;
3415
3416 /*
3417 * The dc->current_state is backed up into dm->cached_dc_state
3418 * before we commit 0 streams.
3419 *
3420 * DC will clear link encoder assignments on the real state
3421 * but the changes won't propagate over to the copy we made
3422 * before the 0 streams commit.
3423 *
3424 * DC expects that link encoder assignments are *not* valid
3425 * when committing a state, so as a workaround we can copy
3426 * off of the current state.
3427 *
3428 * We lose the previous assignments, but we had already
3429 * commit 0 streams anyway.
3430 */
3431 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
3432
3433 r = dm_dmub_hw_init(adev);
3434 if (r) {
3435 drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
3436 return r;
3437 }
3438
3439 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
3440 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
3441
3442 dc_resume(dm->dc);
3443
3444 amdgpu_dm_irq_resume_early(adev);
3445
3446 for (i = 0; i < dc_state->stream_count; i++) {
3447 dc_state->streams[i]->mode_changed = true;
3448 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
3449 dc_state->stream_status[i].plane_states[j]->update_flags.raw
3450 = 0xffffffff;
3451 }
3452 }
3453
3454 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
3455 amdgpu_dm_outbox_init(adev);
3456 dc_enable_dmub_outbox(adev->dm.dc);
3457 }
3458
3459 commit_params.streams = dc_state->streams;
3460 commit_params.stream_count = dc_state->stream_count;
3461 dc_exit_ips_for_hw_access(dm->dc);
3462 WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
3463
3464 dm_gpureset_commit_state(dm->cached_dc_state, dm);
3465
3466 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
3467
3468 dc_state_release(dm->cached_dc_state);
3469 dm->cached_dc_state = NULL;
3470
3471 amdgpu_dm_irq_resume_late(adev);
3472
3473 mutex_unlock(&dm->dc_lock);
3474
3475 /* set the backlight after a reset */
3476 for (i = 0; i < dm->num_of_edps; i++) {
3477 if (dm->backlight_dev[i])
3478 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
3479 }
3480
3481 return 0;
3482 }
3483 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
3484 dc_state_release(dm_state->context);
3485 dm_state->context = dc_state_create(dm->dc, NULL);
3486 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
3487
3488 /* Before powering on DC we need to re-initialize DMUB. */
3489 dm_dmub_hw_resume(adev);
3490
3491 /* Re-enable outbox interrupts for DPIA. */
3492 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
3493 amdgpu_dm_outbox_init(adev);
3494 dc_enable_dmub_outbox(adev->dm.dc);
3495 }
3496
3497 /* power on hardware */
3498 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
3499 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
3500
3501 /* program HPD filter */
3502 dc_resume(dm->dc);
3503
3504 /*
3505 * early enable HPD Rx IRQ, should be done before set mode as short
3506 * pulse interrupts are used for MST
3507 */
3508 amdgpu_dm_irq_resume_early(adev);
3509
3510 s3_handle_hdmi_cec(ddev, false);
3511
3512 /* On resume we need to rewrite the MSTM control bits to enable MST*/
3513 s3_handle_mst(ddev, false);
3514
3515 /* Do detection*/
3516 drm_connector_list_iter_begin(ddev, &iter);
3517 drm_for_each_connector_iter(connector, &iter) {
3518 bool ret;
3519
3520 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
3521 continue;
3522
3523 aconnector = to_amdgpu_dm_connector(connector);
3524
3525 if (!aconnector->dc_link)
3526 continue;
3527
3528 /*
3529 * this is the case when traversing through already created end sink
3530 * MST connectors, should be skipped
3531 */
3532 if (aconnector->mst_root)
3533 continue;
3534
3535 guard(mutex)(&aconnector->hpd_lock);
3536 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3537 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
3538
3539 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3540 emulated_link_detect(aconnector->dc_link);
3541 } else {
3542 guard(mutex)(&dm->dc_lock);
3543 dc_exit_ips_for_hw_access(dm->dc);
3544 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
3545 if (ret) {
3546 /* w/a delay for certain panels */
3547 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
3548 }
3549 }
3550
3551 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
3552 aconnector->fake_enable = false;
3553
3554 if (aconnector->dc_sink)
3555 dc_sink_release(aconnector->dc_sink);
3556 aconnector->dc_sink = NULL;
3557 amdgpu_dm_update_connector_after_detect(aconnector);
3558 }
3559 drm_connector_list_iter_end(&iter);
3560
3561 dm_destroy_cached_state(adev);
3562
3563 /* Do mst topology probing after resuming cached state*/
3564 drm_connector_list_iter_begin(ddev, &iter);
3565 drm_for_each_connector_iter(connector, &iter) {
3566 bool init = false;
3567
3568 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
3569 continue;
3570
3571 aconnector = to_amdgpu_dm_connector(connector);
3572 if (aconnector->dc_link->type != dc_connection_mst_branch ||
3573 aconnector->mst_root)
3574 continue;
3575
3576 scoped_guard(mutex, &aconnector->mst_mgr.lock) {
3577 init = !aconnector->mst_mgr.mst_primary;
3578 }
3579 if (init)
3580 dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
3581 aconnector->dc_link, false);
3582 else
3583 drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
3584 }
3585 drm_connector_list_iter_end(&iter);
3586
3587 amdgpu_dm_irq_resume_late(adev);
3588
3589 amdgpu_dm_smu_write_watermarks_table(adev);
3590
3591 drm_kms_helper_hotplug_event(ddev);
3592
3593 return 0;
3594 }
3595
3596 /**
3597 * DOC: DM Lifecycle
3598 *
3599 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
3600 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
3601 * the base driver's device list to be initialized and torn down accordingly.
3602 *
3603 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
3604 */
3605
3606 static const struct amd_ip_funcs amdgpu_dm_funcs = {
3607 .name = "dm",
3608 .early_init = dm_early_init,
3609 .late_init = dm_late_init,
3610 .sw_init = dm_sw_init,
3611 .sw_fini = dm_sw_fini,
3612 .early_fini = amdgpu_dm_early_fini,
3613 .hw_init = dm_hw_init,
3614 .hw_fini = dm_hw_fini,
3615 .suspend = dm_suspend,
3616 .resume = dm_resume,
3617 .is_idle = dm_is_idle,
3618 .wait_for_idle = dm_wait_for_idle,
3619 .check_soft_reset = dm_check_soft_reset,
3620 .soft_reset = dm_soft_reset,
3621 .set_clockgating_state = dm_set_clockgating_state,
3622 .set_powergating_state = dm_set_powergating_state,
3623 };
3624
3625 const struct amdgpu_ip_block_version dm_ip_block = {
3626 .type = AMD_IP_BLOCK_TYPE_DCE,
3627 .major = 1,
3628 .minor = 0,
3629 .rev = 0,
3630 .funcs = &amdgpu_dm_funcs,
3631 };
3632
3633
3634 /**
3635 * DOC: atomic
3636 *
3637 * *WIP*
3638 */
3639
3640 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
3641 .fb_create = amdgpu_display_user_framebuffer_create,
3642 .get_format_info = amdgpu_dm_plane_get_format_info,
3643 .atomic_check = amdgpu_dm_atomic_check,
3644 .atomic_commit = drm_atomic_helper_commit,
3645 };
3646
3647 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
3648 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
3649 .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
3650 };
3651
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)3652 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
3653 {
3654 const struct drm_panel_backlight_quirk *panel_backlight_quirk;
3655 struct amdgpu_dm_backlight_caps *caps;
3656 struct drm_connector *conn_base;
3657 struct amdgpu_device *adev;
3658 struct drm_luminance_range_info *luminance_range;
3659 struct drm_device *drm;
3660
3661 if (aconnector->bl_idx == -1 ||
3662 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
3663 return;
3664
3665 conn_base = &aconnector->base;
3666 drm = conn_base->dev;
3667 adev = drm_to_adev(drm);
3668
3669 caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3670 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
3671 caps->aux_support = false;
3672
3673 if (caps->ext_caps->bits.oled == 1
3674 /*
3675 * ||
3676 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
3677 * caps->ext_caps->bits.hdr_aux_backlight_control == 1
3678 */)
3679 caps->aux_support = true;
3680
3681 if (amdgpu_backlight == 0)
3682 caps->aux_support = false;
3683 else if (amdgpu_backlight == 1)
3684 caps->aux_support = true;
3685 if (caps->aux_support)
3686 aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
3687
3688 luminance_range = &conn_base->display_info.luminance_range;
3689
3690 if (luminance_range->max_luminance)
3691 caps->aux_max_input_signal = luminance_range->max_luminance;
3692 else
3693 caps->aux_max_input_signal = 512;
3694
3695 if (luminance_range->min_luminance)
3696 caps->aux_min_input_signal = luminance_range->min_luminance;
3697 else
3698 caps->aux_min_input_signal = 1;
3699
3700 panel_backlight_quirk =
3701 drm_get_panel_backlight_quirk(aconnector->drm_edid);
3702 if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
3703 if (panel_backlight_quirk->min_brightness) {
3704 caps->min_input_signal =
3705 panel_backlight_quirk->min_brightness - 1;
3706 drm_info(drm,
3707 "Applying panel backlight quirk, min_brightness: %d\n",
3708 caps->min_input_signal);
3709 }
3710 if (panel_backlight_quirk->brightness_mask) {
3711 drm_info(drm,
3712 "Applying panel backlight quirk, brightness_mask: 0x%X\n",
3713 panel_backlight_quirk->brightness_mask);
3714 caps->brightness_mask =
3715 panel_backlight_quirk->brightness_mask;
3716 }
3717 }
3718 }
3719
DEFINE_FREE(sink_release,struct dc_sink *,if (_T)dc_sink_release (_T))3720 DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
3721
3722 void amdgpu_dm_update_connector_after_detect(
3723 struct amdgpu_dm_connector *aconnector)
3724 {
3725 struct drm_connector *connector = &aconnector->base;
3726 struct dc_sink *sink __free(sink_release) = NULL;
3727 struct drm_device *dev = connector->dev;
3728
3729 /* MST handled by drm_mst framework */
3730 if (aconnector->mst_mgr.mst_state == true)
3731 return;
3732
3733 sink = aconnector->dc_link->local_sink;
3734 if (sink)
3735 dc_sink_retain(sink);
3736
3737 /*
3738 * Edid mgmt connector gets first update only in mode_valid hook and then
3739 * the connector sink is set to either fake or physical sink depends on link status.
3740 * Skip if already done during boot.
3741 */
3742 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3743 && aconnector->dc_em_sink) {
3744
3745 /*
3746 * For S3 resume with headless use eml_sink to fake stream
3747 * because on resume connector->sink is set to NULL
3748 */
3749 guard(mutex)(&dev->mode_config.mutex);
3750
3751 if (sink) {
3752 if (aconnector->dc_sink) {
3753 amdgpu_dm_update_freesync_caps(connector, NULL);
3754 /*
3755 * retain and release below are used to
3756 * bump up refcount for sink because the link doesn't point
3757 * to it anymore after disconnect, so on next crtc to connector
3758 * reshuffle by UMD we will get into unwanted dc_sink release
3759 */
3760 dc_sink_release(aconnector->dc_sink);
3761 }
3762 aconnector->dc_sink = sink;
3763 dc_sink_retain(aconnector->dc_sink);
3764 amdgpu_dm_update_freesync_caps(connector,
3765 aconnector->drm_edid);
3766 } else {
3767 amdgpu_dm_update_freesync_caps(connector, NULL);
3768 if (!aconnector->dc_sink) {
3769 aconnector->dc_sink = aconnector->dc_em_sink;
3770 dc_sink_retain(aconnector->dc_sink);
3771 }
3772 }
3773
3774 return;
3775 }
3776
3777 /*
3778 * TODO: temporary guard to look for proper fix
3779 * if this sink is MST sink, we should not do anything
3780 */
3781 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
3782 return;
3783
3784 if (aconnector->dc_sink == sink) {
3785 /*
3786 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3787 * Do nothing!!
3788 */
3789 drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n",
3790 aconnector->connector_id);
3791 return;
3792 }
3793
3794 drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3795 aconnector->connector_id, aconnector->dc_sink, sink);
3796
3797 guard(mutex)(&dev->mode_config.mutex);
3798
3799 /*
3800 * 1. Update status of the drm connector
3801 * 2. Send an event and let userspace tell us what to do
3802 */
3803 if (sink) {
3804 /*
3805 * TODO: check if we still need the S3 mode update workaround.
3806 * If yes, put it here.
3807 */
3808 if (aconnector->dc_sink) {
3809 amdgpu_dm_update_freesync_caps(connector, NULL);
3810 dc_sink_release(aconnector->dc_sink);
3811 }
3812
3813 aconnector->dc_sink = sink;
3814 dc_sink_retain(aconnector->dc_sink);
3815 if (sink->dc_edid.length == 0) {
3816 aconnector->drm_edid = NULL;
3817 hdmi_cec_unset_edid(aconnector);
3818 if (aconnector->dc_link->aux_mode) {
3819 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3820 }
3821 } else {
3822 const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid;
3823
3824 aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
3825 drm_edid_connector_update(connector, aconnector->drm_edid);
3826
3827 hdmi_cec_set_edid(aconnector);
3828 if (aconnector->dc_link->aux_mode)
3829 drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
3830 connector->display_info.source_physical_address);
3831 }
3832
3833 if (!aconnector->timing_requested) {
3834 aconnector->timing_requested =
3835 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3836 if (!aconnector->timing_requested)
3837 drm_err(dev,
3838 "failed to create aconnector->requested_timing\n");
3839 }
3840
3841 amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
3842 update_connector_ext_caps(aconnector);
3843 } else {
3844 hdmi_cec_unset_edid(aconnector);
3845 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3846 amdgpu_dm_update_freesync_caps(connector, NULL);
3847 aconnector->num_modes = 0;
3848 dc_sink_release(aconnector->dc_sink);
3849 aconnector->dc_sink = NULL;
3850 drm_edid_free(aconnector->drm_edid);
3851 aconnector->drm_edid = NULL;
3852 kfree(aconnector->timing_requested);
3853 aconnector->timing_requested = NULL;
3854 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3855 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3856 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3857 }
3858
3859 update_subconnector_property(aconnector);
3860 }
3861
handle_hpd_irq_helper(struct amdgpu_dm_connector * aconnector)3862 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3863 {
3864 struct drm_connector *connector = &aconnector->base;
3865 struct drm_device *dev = connector->dev;
3866 enum dc_connection_type new_connection_type = dc_connection_none;
3867 struct amdgpu_device *adev = drm_to_adev(dev);
3868 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3869 struct dc *dc = aconnector->dc_link->ctx->dc;
3870 bool ret = false;
3871
3872 if (adev->dm.disable_hpd_irq)
3873 return;
3874
3875 /*
3876 * In case of failure or MST no need to update connector status or notify the OS
3877 * since (for MST case) MST does this in its own context.
3878 */
3879 guard(mutex)(&aconnector->hpd_lock);
3880
3881 if (adev->dm.hdcp_workqueue) {
3882 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3883 dm_con_state->update_hdcp = true;
3884 }
3885 if (aconnector->fake_enable)
3886 aconnector->fake_enable = false;
3887
3888 aconnector->timing_changed = false;
3889
3890 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3891 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
3892
3893 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3894 emulated_link_detect(aconnector->dc_link);
3895
3896 drm_modeset_lock_all(dev);
3897 dm_restore_drm_connector_state(dev, connector);
3898 drm_modeset_unlock_all(dev);
3899
3900 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3901 drm_kms_helper_connector_hotplug_event(connector);
3902 } else {
3903 scoped_guard(mutex, &adev->dm.dc_lock) {
3904 dc_exit_ips_for_hw_access(dc);
3905 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3906 }
3907 if (ret) {
3908 /* w/a delay for certain panels */
3909 apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
3910 amdgpu_dm_update_connector_after_detect(aconnector);
3911
3912 drm_modeset_lock_all(dev);
3913 dm_restore_drm_connector_state(dev, connector);
3914 drm_modeset_unlock_all(dev);
3915
3916 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3917 drm_kms_helper_connector_hotplug_event(connector);
3918 }
3919 }
3920 }
3921
handle_hpd_irq(void * param)3922 static void handle_hpd_irq(void *param)
3923 {
3924 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3925
3926 handle_hpd_irq_helper(aconnector);
3927
3928 }
3929
schedule_hpd_rx_offload_work(struct amdgpu_device * adev,struct hpd_rx_irq_offload_work_queue * offload_wq,union hpd_irq_data hpd_irq_data)3930 static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
3931 union hpd_irq_data hpd_irq_data)
3932 {
3933 struct hpd_rx_irq_offload_work *offload_work =
3934 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3935
3936 if (!offload_work) {
3937 drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
3938 return;
3939 }
3940
3941 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3942 offload_work->data = hpd_irq_data;
3943 offload_work->offload_wq = offload_wq;
3944 offload_work->adev = adev;
3945
3946 queue_work(offload_wq->wq, &offload_work->work);
3947 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3948 }
3949
handle_hpd_rx_irq(void * param)3950 static void handle_hpd_rx_irq(void *param)
3951 {
3952 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3953 struct drm_connector *connector = &aconnector->base;
3954 struct drm_device *dev = connector->dev;
3955 struct dc_link *dc_link = aconnector->dc_link;
3956 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3957 bool result = false;
3958 enum dc_connection_type new_connection_type = dc_connection_none;
3959 struct amdgpu_device *adev = drm_to_adev(dev);
3960 union hpd_irq_data hpd_irq_data;
3961 bool link_loss = false;
3962 bool has_left_work = false;
3963 int idx = dc_link->link_index;
3964 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3965 struct dc *dc = aconnector->dc_link->ctx->dc;
3966
3967 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3968
3969 if (adev->dm.disable_hpd_irq)
3970 return;
3971
3972 /*
3973 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3974 * conflict, after implement i2c helper, this mutex should be
3975 * retired.
3976 */
3977 mutex_lock(&aconnector->hpd_lock);
3978
3979 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3980 &link_loss, true, &has_left_work);
3981
3982 if (!has_left_work)
3983 goto out;
3984
3985 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3986 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
3987 goto out;
3988 }
3989
3990 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3991 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3992 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3993 bool skip = false;
3994
3995 /*
3996 * DOWN_REP_MSG_RDY is also handled by polling method
3997 * mgr->cbs->poll_hpd_irq()
3998 */
3999 spin_lock(&offload_wq->offload_lock);
4000 skip = offload_wq->is_handling_mst_msg_rdy_event;
4001
4002 if (!skip)
4003 offload_wq->is_handling_mst_msg_rdy_event = true;
4004
4005 spin_unlock(&offload_wq->offload_lock);
4006
4007 if (!skip)
4008 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
4009
4010 goto out;
4011 }
4012
4013 if (link_loss) {
4014 bool skip = false;
4015
4016 spin_lock(&offload_wq->offload_lock);
4017 skip = offload_wq->is_handling_link_loss;
4018
4019 if (!skip)
4020 offload_wq->is_handling_link_loss = true;
4021
4022 spin_unlock(&offload_wq->offload_lock);
4023
4024 if (!skip)
4025 schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
4026
4027 goto out;
4028 }
4029 }
4030
4031 out:
4032 if (result && !is_mst_root_connector) {
4033 /* Downstream Port status changed. */
4034 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
4035 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
4036
4037 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4038 emulated_link_detect(dc_link);
4039
4040 if (aconnector->fake_enable)
4041 aconnector->fake_enable = false;
4042
4043 amdgpu_dm_update_connector_after_detect(aconnector);
4044
4045
4046 drm_modeset_lock_all(dev);
4047 dm_restore_drm_connector_state(dev, connector);
4048 drm_modeset_unlock_all(dev);
4049
4050 drm_kms_helper_connector_hotplug_event(connector);
4051 } else {
4052 bool ret = false;
4053
4054 mutex_lock(&adev->dm.dc_lock);
4055 dc_exit_ips_for_hw_access(dc);
4056 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
4057 mutex_unlock(&adev->dm.dc_lock);
4058
4059 if (ret) {
4060 if (aconnector->fake_enable)
4061 aconnector->fake_enable = false;
4062
4063 amdgpu_dm_update_connector_after_detect(aconnector);
4064
4065 drm_modeset_lock_all(dev);
4066 dm_restore_drm_connector_state(dev, connector);
4067 drm_modeset_unlock_all(dev);
4068
4069 drm_kms_helper_connector_hotplug_event(connector);
4070 }
4071 }
4072 }
4073 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
4074 if (adev->dm.hdcp_workqueue)
4075 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
4076 }
4077
4078 if (dc_link->type != dc_connection_mst_branch)
4079 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4080
4081 mutex_unlock(&aconnector->hpd_lock);
4082 }
4083
register_hpd_handlers(struct amdgpu_device * adev)4084 static int register_hpd_handlers(struct amdgpu_device *adev)
4085 {
4086 struct drm_device *dev = adev_to_drm(adev);
4087 struct drm_connector *connector;
4088 struct amdgpu_dm_connector *aconnector;
4089 const struct dc_link *dc_link;
4090 struct dc_interrupt_params int_params = {0};
4091
4092 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
4093 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
4094
4095 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
4096 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
4097 dmub_hpd_callback, true)) {
4098 drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
4099 return -EINVAL;
4100 }
4101
4102 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
4103 dmub_hpd_callback, true)) {
4104 drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
4105 return -EINVAL;
4106 }
4107
4108 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
4109 dmub_hpd_sense_callback, true)) {
4110 drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
4111 return -EINVAL;
4112 }
4113 }
4114
4115 list_for_each_entry(connector,
4116 &dev->mode_config.connector_list, head) {
4117
4118 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
4119 continue;
4120
4121 aconnector = to_amdgpu_dm_connector(connector);
4122 dc_link = aconnector->dc_link;
4123
4124 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
4125 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
4126 int_params.irq_source = dc_link->irq_source_hpd;
4127
4128 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4129 int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
4130 int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
4131 drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
4132 return -EINVAL;
4133 }
4134
4135 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4136 handle_hpd_irq, (void *) aconnector))
4137 return -ENOMEM;
4138 }
4139
4140 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
4141
4142 /* Also register for DP short pulse (hpd_rx). */
4143 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
4144 int_params.irq_source = dc_link->irq_source_hpd_rx;
4145
4146 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4147 int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
4148 int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
4149 drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
4150 return -EINVAL;
4151 }
4152
4153 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4154 handle_hpd_rx_irq, (void *) aconnector))
4155 return -ENOMEM;
4156 }
4157 }
4158 return 0;
4159 }
4160
4161 #if defined(CONFIG_DRM_AMD_DC_SI)
4162 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)4163 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
4164 {
4165 struct dc *dc = adev->dm.dc;
4166 struct common_irq_params *c_irq_params;
4167 struct dc_interrupt_params int_params = {0};
4168 int r;
4169 int i;
4170 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
4171
4172 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
4173 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
4174
4175 /*
4176 * Actions of amdgpu_irq_add_id():
4177 * 1. Register a set() function with base driver.
4178 * Base driver will call set() function to enable/disable an
4179 * interrupt in DC hardware.
4180 * 2. Register amdgpu_dm_irq_handler().
4181 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
4182 * coming from DC hardware.
4183 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
4184 * for acknowledging and handling.
4185 */
4186
4187 /* Use VBLANK interrupt */
4188 for (i = 0; i < adev->mode_info.num_crtc; i++) {
4189 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
4190 if (r) {
4191 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
4192 return r;
4193 }
4194
4195 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4196 int_params.irq_source =
4197 dc_interrupt_to_irq_source(dc, i + 1, 0);
4198
4199 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4200 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
4201 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
4202 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
4203 return -EINVAL;
4204 }
4205
4206 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4207
4208 c_irq_params->adev = adev;
4209 c_irq_params->irq_src = int_params.irq_source;
4210
4211 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4212 dm_crtc_high_irq, c_irq_params))
4213 return -ENOMEM;
4214 }
4215
4216 /* Use GRPH_PFLIP interrupt */
4217 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
4218 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
4219 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4220 if (r) {
4221 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
4222 return r;
4223 }
4224
4225 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4226 int_params.irq_source =
4227 dc_interrupt_to_irq_source(dc, i, 0);
4228
4229 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4230 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
4231 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
4232 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
4233 return -EINVAL;
4234 }
4235
4236 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4237
4238 c_irq_params->adev = adev;
4239 c_irq_params->irq_src = int_params.irq_source;
4240
4241 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4242 dm_pflip_high_irq, c_irq_params))
4243 return -ENOMEM;
4244 }
4245
4246 /* HPD */
4247 r = amdgpu_irq_add_id(adev, client_id,
4248 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4249 if (r) {
4250 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
4251 return r;
4252 }
4253
4254 r = register_hpd_handlers(adev);
4255
4256 return r;
4257 }
4258 #endif
4259
4260 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)4261 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
4262 {
4263 struct dc *dc = adev->dm.dc;
4264 struct common_irq_params *c_irq_params;
4265 struct dc_interrupt_params int_params = {0};
4266 int r;
4267 int i;
4268 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
4269
4270 if (adev->family >= AMDGPU_FAMILY_AI)
4271 client_id = SOC15_IH_CLIENTID_DCE;
4272
4273 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
4274 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
4275
4276 /*
4277 * Actions of amdgpu_irq_add_id():
4278 * 1. Register a set() function with base driver.
4279 * Base driver will call set() function to enable/disable an
4280 * interrupt in DC hardware.
4281 * 2. Register amdgpu_dm_irq_handler().
4282 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
4283 * coming from DC hardware.
4284 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
4285 * for acknowledging and handling.
4286 */
4287
4288 /* Use VBLANK interrupt */
4289 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
4290 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4291 if (r) {
4292 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
4293 return r;
4294 }
4295
4296 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4297 int_params.irq_source =
4298 dc_interrupt_to_irq_source(dc, i, 0);
4299
4300 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4301 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
4302 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
4303 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
4304 return -EINVAL;
4305 }
4306
4307 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4308
4309 c_irq_params->adev = adev;
4310 c_irq_params->irq_src = int_params.irq_source;
4311
4312 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4313 dm_crtc_high_irq, c_irq_params))
4314 return -ENOMEM;
4315 }
4316
4317 /* Use VUPDATE interrupt */
4318 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
4319 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
4320 if (r) {
4321 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
4322 return r;
4323 }
4324
4325 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4326 int_params.irq_source =
4327 dc_interrupt_to_irq_source(dc, i, 0);
4328
4329 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4330 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
4331 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
4332 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
4333 return -EINVAL;
4334 }
4335
4336 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
4337
4338 c_irq_params->adev = adev;
4339 c_irq_params->irq_src = int_params.irq_source;
4340
4341 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4342 dm_vupdate_high_irq, c_irq_params))
4343 return -ENOMEM;
4344 }
4345
4346 /* Use GRPH_PFLIP interrupt */
4347 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
4348 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
4349 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4350 if (r) {
4351 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
4352 return r;
4353 }
4354
4355 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4356 int_params.irq_source =
4357 dc_interrupt_to_irq_source(dc, i, 0);
4358
4359 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4360 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
4361 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
4362 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
4363 return -EINVAL;
4364 }
4365
4366 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4367
4368 c_irq_params->adev = adev;
4369 c_irq_params->irq_src = int_params.irq_source;
4370
4371 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4372 dm_pflip_high_irq, c_irq_params))
4373 return -ENOMEM;
4374 }
4375
4376 /* HPD */
4377 r = amdgpu_irq_add_id(adev, client_id,
4378 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4379 if (r) {
4380 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
4381 return r;
4382 }
4383
4384 r = register_hpd_handlers(adev);
4385
4386 return r;
4387 }
4388
4389 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)4390 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
4391 {
4392 struct dc *dc = adev->dm.dc;
4393 struct common_irq_params *c_irq_params;
4394 struct dc_interrupt_params int_params = {0};
4395 int r;
4396 int i;
4397 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
4398 static const unsigned int vrtl_int_srcid[] = {
4399 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
4400 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
4401 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
4402 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
4403 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
4404 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
4405 };
4406 #endif
4407
4408 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
4409 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
4410
4411 /*
4412 * Actions of amdgpu_irq_add_id():
4413 * 1. Register a set() function with base driver.
4414 * Base driver will call set() function to enable/disable an
4415 * interrupt in DC hardware.
4416 * 2. Register amdgpu_dm_irq_handler().
4417 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
4418 * coming from DC hardware.
4419 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
4420 * for acknowledging and handling.
4421 */
4422
4423 /* Use VSTARTUP interrupt */
4424 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
4425 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
4426 i++) {
4427 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
4428
4429 if (r) {
4430 drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
4431 return r;
4432 }
4433
4434 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4435 int_params.irq_source =
4436 dc_interrupt_to_irq_source(dc, i, 0);
4437
4438 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4439 int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
4440 int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
4441 drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
4442 return -EINVAL;
4443 }
4444
4445 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4446
4447 c_irq_params->adev = adev;
4448 c_irq_params->irq_src = int_params.irq_source;
4449
4450 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4451 dm_crtc_high_irq, c_irq_params))
4452 return -ENOMEM;
4453 }
4454
4455 /* Use otg vertical line interrupt */
4456 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
4457 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
4458 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
4459 vrtl_int_srcid[i], &adev->vline0_irq);
4460
4461 if (r) {
4462 drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
4463 return r;
4464 }
4465
4466 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4467 int_params.irq_source =
4468 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
4469
4470 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4471 int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
4472 int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
4473 drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
4474 return -EINVAL;
4475 }
4476
4477 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
4478 - DC_IRQ_SOURCE_DC1_VLINE0];
4479
4480 c_irq_params->adev = adev;
4481 c_irq_params->irq_src = int_params.irq_source;
4482
4483 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4484 dm_dcn_vertical_interrupt0_high_irq,
4485 c_irq_params))
4486 return -ENOMEM;
4487 }
4488 #endif
4489
4490 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
4491 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
4492 * to trigger at end of each vblank, regardless of state of the lock,
4493 * matching DCE behaviour.
4494 */
4495 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
4496 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
4497 i++) {
4498 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
4499
4500 if (r) {
4501 drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
4502 return r;
4503 }
4504
4505 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4506 int_params.irq_source =
4507 dc_interrupt_to_irq_source(dc, i, 0);
4508
4509 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4510 int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
4511 int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
4512 drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
4513 return -EINVAL;
4514 }
4515
4516 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
4517
4518 c_irq_params->adev = adev;
4519 c_irq_params->irq_src = int_params.irq_source;
4520
4521 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4522 dm_vupdate_high_irq, c_irq_params))
4523 return -ENOMEM;
4524 }
4525
4526 /* Use GRPH_PFLIP interrupt */
4527 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
4528 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
4529 i++) {
4530 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
4531 if (r) {
4532 drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
4533 return r;
4534 }
4535
4536 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
4537 int_params.irq_source =
4538 dc_interrupt_to_irq_source(dc, i, 0);
4539
4540 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
4541 int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
4542 int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
4543 drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
4544 return -EINVAL;
4545 }
4546
4547 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4548
4549 c_irq_params->adev = adev;
4550 c_irq_params->irq_src = int_params.irq_source;
4551
4552 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4553 dm_pflip_high_irq, c_irq_params))
4554 return -ENOMEM;
4555 }
4556
4557 /* HPD */
4558 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
4559 &adev->hpd_irq);
4560 if (r) {
4561 drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
4562 return r;
4563 }
4564
4565 r = register_hpd_handlers(adev);
4566
4567 return r;
4568 }
4569 /* Register Outbox IRQ sources and initialize IRQ callbacks */
register_outbox_irq_handlers(struct amdgpu_device * adev)4570 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
4571 {
4572 struct dc *dc = adev->dm.dc;
4573 struct common_irq_params *c_irq_params;
4574 struct dc_interrupt_params int_params = {0};
4575 int r, i;
4576
4577 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
4578 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
4579
4580 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
4581 &adev->dmub_outbox_irq);
4582 if (r) {
4583 drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
4584 return r;
4585 }
4586
4587 if (dc->ctx->dmub_srv) {
4588 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
4589 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
4590 int_params.irq_source =
4591 dc_interrupt_to_irq_source(dc, i, 0);
4592
4593 c_irq_params = &adev->dm.dmub_outbox_params[0];
4594
4595 c_irq_params->adev = adev;
4596 c_irq_params->irq_src = int_params.irq_source;
4597
4598 if (!amdgpu_dm_irq_register_interrupt(adev, &int_params,
4599 dm_dmub_outbox1_low_irq, c_irq_params))
4600 return -ENOMEM;
4601 }
4602
4603 return 0;
4604 }
4605
4606 /*
4607 * Acquires the lock for the atomic state object and returns
4608 * the new atomic state.
4609 *
4610 * This should only be called during atomic check.
4611 */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)4612 int dm_atomic_get_state(struct drm_atomic_state *state,
4613 struct dm_atomic_state **dm_state)
4614 {
4615 struct drm_device *dev = state->dev;
4616 struct amdgpu_device *adev = drm_to_adev(dev);
4617 struct amdgpu_display_manager *dm = &adev->dm;
4618 struct drm_private_state *priv_state;
4619
4620 if (*dm_state)
4621 return 0;
4622
4623 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
4624 if (IS_ERR(priv_state))
4625 return PTR_ERR(priv_state);
4626
4627 *dm_state = to_dm_atomic_state(priv_state);
4628
4629 return 0;
4630 }
4631
4632 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)4633 dm_atomic_get_new_state(struct drm_atomic_state *state)
4634 {
4635 struct drm_device *dev = state->dev;
4636 struct amdgpu_device *adev = drm_to_adev(dev);
4637 struct amdgpu_display_manager *dm = &adev->dm;
4638 struct drm_private_obj *obj;
4639 struct drm_private_state *new_obj_state;
4640 int i;
4641
4642 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
4643 if (obj->funcs == dm->atomic_obj.funcs)
4644 return to_dm_atomic_state(new_obj_state);
4645 }
4646
4647 return NULL;
4648 }
4649
4650 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)4651 dm_atomic_duplicate_state(struct drm_private_obj *obj)
4652 {
4653 struct dm_atomic_state *old_state, *new_state;
4654
4655 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
4656 if (!new_state)
4657 return NULL;
4658
4659 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
4660
4661 old_state = to_dm_atomic_state(obj->state);
4662
4663 if (old_state && old_state->context)
4664 new_state->context = dc_state_create_copy(old_state->context);
4665
4666 if (!new_state->context) {
4667 kfree(new_state);
4668 return NULL;
4669 }
4670
4671 return &new_state->base;
4672 }
4673
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)4674 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
4675 struct drm_private_state *state)
4676 {
4677 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4678
4679 if (dm_state && dm_state->context)
4680 dc_state_release(dm_state->context);
4681
4682 kfree(dm_state);
4683 }
4684
4685 static struct drm_private_state_funcs dm_atomic_state_funcs = {
4686 .atomic_duplicate_state = dm_atomic_duplicate_state,
4687 .atomic_destroy_state = dm_atomic_destroy_state,
4688 };
4689
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)4690 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
4691 {
4692 struct dm_atomic_state *state;
4693 int r;
4694
4695 adev->mode_info.mode_config_initialized = true;
4696
4697 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
4698 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4699
4700 adev_to_drm(adev)->mode_config.max_width = 16384;
4701 adev_to_drm(adev)->mode_config.max_height = 16384;
4702
4703 adev_to_drm(adev)->mode_config.preferred_depth = 24;
4704 if (adev->asic_type == CHIP_HAWAII)
4705 /* disable prefer shadow for now due to hibernation issues */
4706 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
4707 else
4708 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
4709 /* indicates support for immediate flip */
4710 adev_to_drm(adev)->mode_config.async_page_flip = true;
4711
4712 state = kzalloc(sizeof(*state), GFP_KERNEL);
4713 if (!state)
4714 return -ENOMEM;
4715
4716 state->context = dc_state_create_current_copy(adev->dm.dc);
4717 if (!state->context) {
4718 kfree(state);
4719 return -ENOMEM;
4720 }
4721
4722 drm_atomic_private_obj_init(adev_to_drm(adev),
4723 &adev->dm.atomic_obj,
4724 &state->base,
4725 &dm_atomic_state_funcs);
4726
4727 r = amdgpu_display_modeset_create_props(adev);
4728 if (r) {
4729 dc_state_release(state->context);
4730 kfree(state);
4731 return r;
4732 }
4733
4734 #ifdef AMD_PRIVATE_COLOR
4735 if (amdgpu_dm_create_color_properties(adev)) {
4736 dc_state_release(state->context);
4737 kfree(state);
4738 return -ENOMEM;
4739 }
4740 #endif
4741
4742 r = amdgpu_dm_audio_init(adev);
4743 if (r) {
4744 dc_state_release(state->context);
4745 kfree(state);
4746 return r;
4747 }
4748
4749 return 0;
4750 }
4751
4752 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4753 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
4754 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2)
4755 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
4756
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm,int bl_idx)4757 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4758 int bl_idx)
4759 {
4760 struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
4761
4762 if (caps->caps_valid)
4763 return;
4764
4765 #if defined(CONFIG_ACPI)
4766 amdgpu_acpi_get_backlight_caps(caps);
4767
4768 /* validate the firmware value is sane */
4769 if (caps->caps_valid) {
4770 int spread = caps->max_input_signal - caps->min_input_signal;
4771
4772 if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
4773 caps->min_input_signal < 0 ||
4774 spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
4775 spread < AMDGPU_DM_MIN_SPREAD) {
4776 DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
4777 caps->min_input_signal, caps->max_input_signal);
4778 caps->caps_valid = false;
4779 }
4780 }
4781
4782 if (!caps->caps_valid) {
4783 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4784 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4785 caps->caps_valid = true;
4786 }
4787 #else
4788 if (caps->aux_support)
4789 return;
4790
4791 caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4792 caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4793 caps->caps_valid = true;
4794 #endif
4795 }
4796
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned int * min,unsigned int * max)4797 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4798 unsigned int *min, unsigned int *max)
4799 {
4800 if (!caps)
4801 return 0;
4802
4803 if (caps->aux_support) {
4804 // Firmware limits are in nits, DC API wants millinits.
4805 *max = 1000 * caps->aux_max_input_signal;
4806 *min = 1000 * caps->aux_min_input_signal;
4807 } else {
4808 // Firmware limits are 8-bit, PWM control is 16-bit.
4809 *max = 0x101 * caps->max_input_signal;
4810 *min = 0x101 * caps->min_input_signal;
4811 }
4812 return 1;
4813 }
4814
4815 /* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
scale_input_to_fw(int min,int max,u64 input)4816 static inline u32 scale_input_to_fw(int min, int max, u64 input)
4817 {
4818 return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
4819 }
4820
4821 /* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
scale_fw_to_input(int min,int max,u64 input)4822 static inline u32 scale_fw_to_input(int min, int max, u64 input)
4823 {
4824 return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
4825 }
4826
convert_custom_brightness(const struct amdgpu_dm_backlight_caps * caps,unsigned int min,unsigned int max,uint32_t * user_brightness)4827 static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
4828 unsigned int min, unsigned int max,
4829 uint32_t *user_brightness)
4830 {
4831 u32 brightness = scale_input_to_fw(min, max, *user_brightness);
4832 u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
4833 int left, right;
4834
4835 if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
4836 return;
4837
4838 if (!caps->data_points)
4839 return;
4840
4841 /*
4842 * Handle the case where brightness is below the first data point
4843 * Interpolate between (0,0) and (first_signal, first_lum)
4844 */
4845 if (brightness < caps->luminance_data[0].input_signal) {
4846 lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
4847 caps->luminance_data[0].input_signal);
4848 goto scale;
4849 }
4850
4851 left = 0;
4852 right = caps->data_points - 1;
4853 while (left <= right) {
4854 int mid = left + (right - left) / 2;
4855 u8 signal = caps->luminance_data[mid].input_signal;
4856
4857 /* Exact match found */
4858 if (signal == brightness) {
4859 lum = caps->luminance_data[mid].luminance;
4860 goto scale;
4861 }
4862
4863 if (signal < brightness)
4864 left = mid + 1;
4865 else
4866 right = mid - 1;
4867 }
4868
4869 /* verify bound */
4870 if (left >= caps->data_points)
4871 left = caps->data_points - 1;
4872
4873 /* At this point, left > right */
4874 lower_signal = caps->luminance_data[right].input_signal;
4875 upper_signal = caps->luminance_data[left].input_signal;
4876 lower_lum = caps->luminance_data[right].luminance;
4877 upper_lum = caps->luminance_data[left].luminance;
4878
4879 /* interpolate */
4880 if (right == left || !lower_lum)
4881 lum = upper_lum;
4882 else
4883 lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
4884 (brightness - lower_signal),
4885 upper_signal - lower_signal);
4886 scale:
4887 *user_brightness = scale_fw_to_input(min, max,
4888 DIV_ROUND_CLOSEST(lum * brightness, 101));
4889 }
4890
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4891 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4892 uint32_t brightness)
4893 {
4894 unsigned int min, max;
4895
4896 if (!get_brightness_range(caps, &min, &max))
4897 return brightness;
4898
4899 convert_custom_brightness(caps, min, max, &brightness);
4900
4901 // Rescale 0..max to min..max
4902 return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
4903 }
4904
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4905 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4906 uint32_t brightness)
4907 {
4908 unsigned int min, max;
4909
4910 if (!get_brightness_range(caps, &min, &max))
4911 return brightness;
4912
4913 if (brightness < min)
4914 return 0;
4915 // Rescale min..max to 0..max
4916 return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
4917 max - min);
4918 }
4919
amdgpu_dm_backlight_set_level(struct amdgpu_display_manager * dm,int bl_idx,u32 user_brightness)4920 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4921 int bl_idx,
4922 u32 user_brightness)
4923 {
4924 struct amdgpu_dm_backlight_caps *caps;
4925 struct dc_link *link;
4926 u32 brightness;
4927 bool rc, reallow_idle = false;
4928
4929 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4930 caps = &dm->backlight_caps[bl_idx];
4931
4932 dm->brightness[bl_idx] = user_brightness;
4933 /* update scratch register */
4934 if (bl_idx == 0)
4935 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4936 brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
4937 link = (struct dc_link *)dm->backlight_link[bl_idx];
4938
4939 /* Apply brightness quirk */
4940 if (caps->brightness_mask)
4941 brightness |= caps->brightness_mask;
4942
4943 /* Change brightness based on AUX property */
4944 mutex_lock(&dm->dc_lock);
4945 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
4946 dc_allow_idle_optimizations(dm->dc, false);
4947 reallow_idle = true;
4948 }
4949
4950 if (trace_amdgpu_dm_brightness_enabled()) {
4951 trace_amdgpu_dm_brightness(__builtin_return_address(0),
4952 user_brightness,
4953 brightness,
4954 caps->aux_support,
4955 power_supply_is_system_supplied() > 0);
4956 }
4957
4958 if (caps->aux_support) {
4959 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4960 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4961 if (!rc)
4962 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4963 } else {
4964 struct set_backlight_level_params backlight_level_params = { 0 };
4965
4966 backlight_level_params.backlight_pwm_u16_16 = brightness;
4967 backlight_level_params.transition_time_in_ms = 0;
4968
4969 rc = dc_link_set_backlight_level(link, &backlight_level_params);
4970 if (!rc)
4971 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4972 }
4973
4974 if (dm->dc->caps.ips_support && reallow_idle)
4975 dc_allow_idle_optimizations(dm->dc, true);
4976
4977 mutex_unlock(&dm->dc_lock);
4978
4979 if (rc)
4980 dm->actual_brightness[bl_idx] = user_brightness;
4981 }
4982
amdgpu_dm_backlight_update_status(struct backlight_device * bd)4983 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4984 {
4985 struct amdgpu_display_manager *dm = bl_get_data(bd);
4986 int i;
4987
4988 for (i = 0; i < dm->num_of_edps; i++) {
4989 if (bd == dm->backlight_dev[i])
4990 break;
4991 }
4992 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4993 i = 0;
4994 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4995
4996 return 0;
4997 }
4998
amdgpu_dm_backlight_get_level(struct amdgpu_display_manager * dm,int bl_idx)4999 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
5000 int bl_idx)
5001 {
5002 int ret;
5003 struct amdgpu_dm_backlight_caps caps;
5004 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
5005
5006 amdgpu_dm_update_backlight_caps(dm, bl_idx);
5007 caps = dm->backlight_caps[bl_idx];
5008
5009 if (caps.aux_support) {
5010 u32 avg, peak;
5011
5012 if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
5013 return dm->brightness[bl_idx];
5014 return convert_brightness_to_user(&caps, avg);
5015 }
5016
5017 ret = dc_link_get_backlight_level(link);
5018
5019 if (ret == DC_ERROR_UNEXPECTED)
5020 return dm->brightness[bl_idx];
5021
5022 return convert_brightness_to_user(&caps, ret);
5023 }
5024
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)5025 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
5026 {
5027 struct amdgpu_display_manager *dm = bl_get_data(bd);
5028 int i;
5029
5030 for (i = 0; i < dm->num_of_edps; i++) {
5031 if (bd == dm->backlight_dev[i])
5032 break;
5033 }
5034 if (i >= AMDGPU_DM_MAX_NUM_EDP)
5035 i = 0;
5036 return amdgpu_dm_backlight_get_level(dm, i);
5037 }
5038
5039 static const struct backlight_ops amdgpu_dm_backlight_ops = {
5040 .options = BL_CORE_SUSPENDRESUME,
5041 .get_brightness = amdgpu_dm_backlight_get_brightness,
5042 .update_status = amdgpu_dm_backlight_update_status,
5043 };
5044
5045 static void
amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector * aconnector)5046 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
5047 {
5048 struct drm_device *drm = aconnector->base.dev;
5049 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
5050 struct backlight_properties props = { 0 };
5051 struct amdgpu_dm_backlight_caps *caps;
5052 char bl_name[16];
5053 int min, max;
5054
5055 if (aconnector->bl_idx == -1)
5056 return;
5057
5058 if (!acpi_video_backlight_use_native()) {
5059 drm_info(drm, "Skipping amdgpu DM backlight registration\n");
5060 /* Try registering an ACPI video backlight device instead. */
5061 acpi_video_register_backlight();
5062 return;
5063 }
5064
5065 caps = &dm->backlight_caps[aconnector->bl_idx];
5066 if (get_brightness_range(caps, &min, &max)) {
5067 if (power_supply_is_system_supplied() > 0)
5068 props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
5069 else
5070 props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
5071 /* min is zero, so max needs to be adjusted */
5072 props.max_brightness = max - min;
5073 drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
5074 caps->ac_level, caps->dc_level);
5075 } else
5076 props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
5077
5078 if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
5079 drm_info(drm, "Using custom brightness curve\n");
5080 props.scale = BACKLIGHT_SCALE_NON_LINEAR;
5081 } else
5082 props.scale = BACKLIGHT_SCALE_LINEAR;
5083 props.type = BACKLIGHT_RAW;
5084
5085 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
5086 drm->primary->index + aconnector->bl_idx);
5087
5088 dm->backlight_dev[aconnector->bl_idx] =
5089 backlight_device_register(bl_name, aconnector->base.kdev, dm,
5090 &amdgpu_dm_backlight_ops, &props);
5091 dm->brightness[aconnector->bl_idx] = props.brightness;
5092
5093 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
5094 drm_err(drm, "DM: Backlight registration failed!\n");
5095 dm->backlight_dev[aconnector->bl_idx] = NULL;
5096 } else
5097 drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
5098 }
5099
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)5100 static int initialize_plane(struct amdgpu_display_manager *dm,
5101 struct amdgpu_mode_info *mode_info, int plane_id,
5102 enum drm_plane_type plane_type,
5103 const struct dc_plane_cap *plane_cap)
5104 {
5105 struct drm_plane *plane;
5106 unsigned long possible_crtcs;
5107 int ret = 0;
5108
5109 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
5110 if (!plane) {
5111 drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
5112 return -ENOMEM;
5113 }
5114 plane->type = plane_type;
5115
5116 /*
5117 * HACK: IGT tests expect that the primary plane for a CRTC
5118 * can only have one possible CRTC. Only expose support for
5119 * any CRTC if they're not going to be used as a primary plane
5120 * for a CRTC - like overlay or underlay planes.
5121 */
5122 possible_crtcs = 1 << plane_id;
5123 if (plane_id >= dm->dc->caps.max_streams)
5124 possible_crtcs = 0xff;
5125
5126 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
5127
5128 if (ret) {
5129 drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
5130 kfree(plane);
5131 return ret;
5132 }
5133
5134 if (mode_info)
5135 mode_info->planes[plane_id] = plane;
5136
5137 return ret;
5138 }
5139
5140
setup_backlight_device(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector)5141 static void setup_backlight_device(struct amdgpu_display_manager *dm,
5142 struct amdgpu_dm_connector *aconnector)
5143 {
5144 struct dc_link *link = aconnector->dc_link;
5145 int bl_idx = dm->num_of_edps;
5146
5147 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
5148 link->type == dc_connection_none)
5149 return;
5150
5151 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
5152 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
5153 return;
5154 }
5155
5156 aconnector->bl_idx = bl_idx;
5157
5158 amdgpu_dm_update_backlight_caps(dm, bl_idx);
5159 dm->backlight_link[bl_idx] = link;
5160 dm->num_of_edps++;
5161
5162 update_connector_ext_caps(aconnector);
5163 }
5164
5165 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
5166
5167 /*
5168 * In this architecture, the association
5169 * connector -> encoder -> crtc
5170 * id not really requried. The crtc and connector will hold the
5171 * display_index as an abstraction to use with DAL component
5172 *
5173 * Returns 0 on success
5174 */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)5175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
5176 {
5177 struct amdgpu_display_manager *dm = &adev->dm;
5178 s32 i;
5179 struct amdgpu_dm_connector *aconnector = NULL;
5180 struct amdgpu_encoder *aencoder = NULL;
5181 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5182 u32 link_cnt;
5183 s32 primary_planes;
5184 enum dc_connection_type new_connection_type = dc_connection_none;
5185 const struct dc_plane_cap *plane;
5186 bool psr_feature_enabled = false;
5187 bool replay_feature_enabled = false;
5188 int max_overlay = dm->dc->caps.max_slave_planes;
5189
5190 dm->display_indexes_num = dm->dc->caps.max_streams;
5191 /* Update the actual used number of crtc */
5192 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
5193
5194 amdgpu_dm_set_irq_funcs(adev);
5195
5196 link_cnt = dm->dc->caps.max_links;
5197 if (amdgpu_dm_mode_config_init(dm->adev)) {
5198 drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
5199 return -EINVAL;
5200 }
5201
5202 /* There is one primary plane per CRTC */
5203 primary_planes = dm->dc->caps.max_streams;
5204 if (primary_planes > AMDGPU_MAX_PLANES) {
5205 drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
5206 return -EINVAL;
5207 }
5208
5209 /*
5210 * Initialize primary planes, implicit planes for legacy IOCTLS.
5211 * Order is reversed to match iteration order in atomic check.
5212 */
5213 for (i = (primary_planes - 1); i >= 0; i--) {
5214 plane = &dm->dc->caps.planes[i];
5215
5216 if (initialize_plane(dm, mode_info, i,
5217 DRM_PLANE_TYPE_PRIMARY, plane)) {
5218 drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
5219 goto fail;
5220 }
5221 }
5222
5223 /*
5224 * Initialize overlay planes, index starting after primary planes.
5225 * These planes have a higher DRM index than the primary planes since
5226 * they should be considered as having a higher z-order.
5227 * Order is reversed to match iteration order in atomic check.
5228 *
5229 * Only support DCN for now, and only expose one so we don't encourage
5230 * userspace to use up all the pipes.
5231 */
5232 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
5233 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
5234
5235 /* Do not create overlay if MPO disabled */
5236 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
5237 break;
5238
5239 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
5240 continue;
5241
5242 if (!plane->pixel_format_support.argb8888)
5243 continue;
5244
5245 if (max_overlay-- == 0)
5246 break;
5247
5248 if (initialize_plane(dm, NULL, primary_planes + i,
5249 DRM_PLANE_TYPE_OVERLAY, plane)) {
5250 drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
5251 goto fail;
5252 }
5253 }
5254
5255 for (i = 0; i < dm->dc->caps.max_streams; i++)
5256 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
5257 drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
5258 goto fail;
5259 }
5260
5261 /* Use Outbox interrupt */
5262 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5263 case IP_VERSION(3, 0, 0):
5264 case IP_VERSION(3, 1, 2):
5265 case IP_VERSION(3, 1, 3):
5266 case IP_VERSION(3, 1, 4):
5267 case IP_VERSION(3, 1, 5):
5268 case IP_VERSION(3, 1, 6):
5269 case IP_VERSION(3, 2, 0):
5270 case IP_VERSION(3, 2, 1):
5271 case IP_VERSION(2, 1, 0):
5272 case IP_VERSION(3, 5, 0):
5273 case IP_VERSION(3, 5, 1):
5274 case IP_VERSION(3, 6, 0):
5275 case IP_VERSION(4, 0, 1):
5276 if (register_outbox_irq_handlers(dm->adev)) {
5277 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
5278 goto fail;
5279 }
5280 break;
5281 default:
5282 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
5283 amdgpu_ip_version(adev, DCE_HWIP, 0));
5284 }
5285
5286 /* Determine whether to enable PSR support by default. */
5287 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
5288 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5289 case IP_VERSION(3, 1, 2):
5290 case IP_VERSION(3, 1, 3):
5291 case IP_VERSION(3, 1, 4):
5292 case IP_VERSION(3, 1, 5):
5293 case IP_VERSION(3, 1, 6):
5294 case IP_VERSION(3, 2, 0):
5295 case IP_VERSION(3, 2, 1):
5296 case IP_VERSION(3, 5, 0):
5297 case IP_VERSION(3, 5, 1):
5298 case IP_VERSION(3, 6, 0):
5299 case IP_VERSION(4, 0, 1):
5300 psr_feature_enabled = true;
5301 break;
5302 default:
5303 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
5304 break;
5305 }
5306 }
5307
5308 /* Determine whether to enable Replay support by default. */
5309 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
5310 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5311 case IP_VERSION(3, 1, 4):
5312 case IP_VERSION(3, 2, 0):
5313 case IP_VERSION(3, 2, 1):
5314 case IP_VERSION(3, 5, 0):
5315 case IP_VERSION(3, 5, 1):
5316 case IP_VERSION(3, 6, 0):
5317 replay_feature_enabled = true;
5318 break;
5319
5320 default:
5321 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
5322 break;
5323 }
5324 }
5325
5326 if (link_cnt > MAX_LINKS) {
5327 drm_err(adev_to_drm(adev),
5328 "KMS: Cannot support more than %d display indexes\n",
5329 MAX_LINKS);
5330 goto fail;
5331 }
5332
5333 /* loops over all connectors on the board */
5334 for (i = 0; i < link_cnt; i++) {
5335 struct dc_link *link = NULL;
5336
5337 link = dc_get_link_at_index(dm->dc, i);
5338
5339 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
5340 struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
5341
5342 if (!wbcon) {
5343 drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
5344 continue;
5345 }
5346
5347 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
5348 drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
5349 kfree(wbcon);
5350 continue;
5351 }
5352
5353 link->psr_settings.psr_feature_enabled = false;
5354 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
5355
5356 continue;
5357 }
5358
5359 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
5360 if (!aconnector)
5361 goto fail;
5362
5363 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
5364 if (!aencoder)
5365 goto fail;
5366
5367 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
5368 drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
5369 goto fail;
5370 }
5371
5372 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
5373 drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
5374 goto fail;
5375 }
5376
5377 if (dm->hpd_rx_offload_wq)
5378 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
5379 aconnector;
5380
5381 if (!dc_link_detect_connection_type(link, &new_connection_type))
5382 drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
5383
5384 if (aconnector->base.force && new_connection_type == dc_connection_none) {
5385 emulated_link_detect(link);
5386 amdgpu_dm_update_connector_after_detect(aconnector);
5387 } else {
5388 bool ret = false;
5389
5390 mutex_lock(&dm->dc_lock);
5391 dc_exit_ips_for_hw_access(dm->dc);
5392 ret = dc_link_detect(link, DETECT_REASON_BOOT);
5393 mutex_unlock(&dm->dc_lock);
5394
5395 if (ret) {
5396 amdgpu_dm_update_connector_after_detect(aconnector);
5397 setup_backlight_device(dm, aconnector);
5398
5399 /* Disable PSR if Replay can be enabled */
5400 if (replay_feature_enabled)
5401 if (amdgpu_dm_set_replay_caps(link, aconnector))
5402 psr_feature_enabled = false;
5403
5404 if (psr_feature_enabled) {
5405 amdgpu_dm_set_psr_caps(link);
5406 drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
5407 link->psr_settings.psr_feature_enabled,
5408 link->psr_settings.psr_version,
5409 link->dpcd_caps.psr_info.psr_version,
5410 link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
5411 link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
5412 }
5413 }
5414 }
5415 amdgpu_set_panel_orientation(&aconnector->base);
5416 }
5417
5418 /* Software is initialized. Now we can register interrupt handlers. */
5419 switch (adev->asic_type) {
5420 #if defined(CONFIG_DRM_AMD_DC_SI)
5421 case CHIP_TAHITI:
5422 case CHIP_PITCAIRN:
5423 case CHIP_VERDE:
5424 case CHIP_OLAND:
5425 if (dce60_register_irq_handlers(dm->adev)) {
5426 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
5427 goto fail;
5428 }
5429 break;
5430 #endif
5431 case CHIP_BONAIRE:
5432 case CHIP_HAWAII:
5433 case CHIP_KAVERI:
5434 case CHIP_KABINI:
5435 case CHIP_MULLINS:
5436 case CHIP_TONGA:
5437 case CHIP_FIJI:
5438 case CHIP_CARRIZO:
5439 case CHIP_STONEY:
5440 case CHIP_POLARIS11:
5441 case CHIP_POLARIS10:
5442 case CHIP_POLARIS12:
5443 case CHIP_VEGAM:
5444 case CHIP_VEGA10:
5445 case CHIP_VEGA12:
5446 case CHIP_VEGA20:
5447 if (dce110_register_irq_handlers(dm->adev)) {
5448 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
5449 goto fail;
5450 }
5451 break;
5452 default:
5453 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5454 case IP_VERSION(1, 0, 0):
5455 case IP_VERSION(1, 0, 1):
5456 case IP_VERSION(2, 0, 2):
5457 case IP_VERSION(2, 0, 3):
5458 case IP_VERSION(2, 0, 0):
5459 case IP_VERSION(2, 1, 0):
5460 case IP_VERSION(3, 0, 0):
5461 case IP_VERSION(3, 0, 2):
5462 case IP_VERSION(3, 0, 3):
5463 case IP_VERSION(3, 0, 1):
5464 case IP_VERSION(3, 1, 2):
5465 case IP_VERSION(3, 1, 3):
5466 case IP_VERSION(3, 1, 4):
5467 case IP_VERSION(3, 1, 5):
5468 case IP_VERSION(3, 1, 6):
5469 case IP_VERSION(3, 2, 0):
5470 case IP_VERSION(3, 2, 1):
5471 case IP_VERSION(3, 5, 0):
5472 case IP_VERSION(3, 5, 1):
5473 case IP_VERSION(3, 6, 0):
5474 case IP_VERSION(4, 0, 1):
5475 if (dcn10_register_irq_handlers(dm->adev)) {
5476 drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
5477 goto fail;
5478 }
5479 break;
5480 default:
5481 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
5482 amdgpu_ip_version(adev, DCE_HWIP, 0));
5483 goto fail;
5484 }
5485 break;
5486 }
5487
5488 return 0;
5489 fail:
5490 kfree(aencoder);
5491 kfree(aconnector);
5492
5493 return -EINVAL;
5494 }
5495
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)5496 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
5497 {
5498 if (dm->atomic_obj.state)
5499 drm_atomic_private_obj_fini(&dm->atomic_obj);
5500 }
5501
5502 /******************************************************************************
5503 * amdgpu_display_funcs functions
5504 *****************************************************************************/
5505
5506 /*
5507 * dm_bandwidth_update - program display watermarks
5508 *
5509 * @adev: amdgpu_device pointer
5510 *
5511 * Calculate and program the display watermarks and line buffer allocation.
5512 */
dm_bandwidth_update(struct amdgpu_device * adev)5513 static void dm_bandwidth_update(struct amdgpu_device *adev)
5514 {
5515 /* TODO: implement later */
5516 }
5517
5518 static const struct amdgpu_display_funcs dm_display_funcs = {
5519 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
5520 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
5521 .backlight_set_level = NULL, /* never called for DC */
5522 .backlight_get_level = NULL, /* never called for DC */
5523 .hpd_sense = NULL,/* called unconditionally */
5524 .hpd_set_polarity = NULL, /* called unconditionally */
5525 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
5526 .page_flip_get_scanoutpos =
5527 dm_crtc_get_scanoutpos,/* called unconditionally */
5528 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
5529 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
5530 };
5531
5532 #if defined(CONFIG_DEBUG_KERNEL_DC)
5533
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)5534 static ssize_t s3_debug_store(struct device *device,
5535 struct device_attribute *attr,
5536 const char *buf,
5537 size_t count)
5538 {
5539 int ret;
5540 int s3_state;
5541 struct drm_device *drm_dev = dev_get_drvdata(device);
5542 struct amdgpu_device *adev = drm_to_adev(drm_dev);
5543 struct amdgpu_ip_block *ip_block;
5544
5545 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE);
5546 if (!ip_block)
5547 return -EINVAL;
5548
5549 ret = kstrtoint(buf, 0, &s3_state);
5550
5551 if (ret == 0) {
5552 if (s3_state) {
5553 dm_resume(ip_block);
5554 drm_kms_helper_hotplug_event(adev_to_drm(adev));
5555 } else
5556 dm_suspend(ip_block);
5557 }
5558
5559 return ret == 0 ? count : 0;
5560 }
5561
5562 DEVICE_ATTR_WO(s3_debug);
5563
5564 #endif
5565
dm_init_microcode(struct amdgpu_device * adev)5566 static int dm_init_microcode(struct amdgpu_device *adev)
5567 {
5568 char *fw_name_dmub;
5569 int r;
5570
5571 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5572 case IP_VERSION(2, 1, 0):
5573 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
5574 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
5575 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
5576 break;
5577 case IP_VERSION(3, 0, 0):
5578 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
5579 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
5580 else
5581 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
5582 break;
5583 case IP_VERSION(3, 0, 1):
5584 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
5585 break;
5586 case IP_VERSION(3, 0, 2):
5587 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
5588 break;
5589 case IP_VERSION(3, 0, 3):
5590 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
5591 break;
5592 case IP_VERSION(3, 1, 2):
5593 case IP_VERSION(3, 1, 3):
5594 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
5595 break;
5596 case IP_VERSION(3, 1, 4):
5597 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
5598 break;
5599 case IP_VERSION(3, 1, 5):
5600 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
5601 break;
5602 case IP_VERSION(3, 1, 6):
5603 fw_name_dmub = FIRMWARE_DCN316_DMUB;
5604 break;
5605 case IP_VERSION(3, 2, 0):
5606 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
5607 break;
5608 case IP_VERSION(3, 2, 1):
5609 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
5610 break;
5611 case IP_VERSION(3, 5, 0):
5612 fw_name_dmub = FIRMWARE_DCN_35_DMUB;
5613 break;
5614 case IP_VERSION(3, 5, 1):
5615 fw_name_dmub = FIRMWARE_DCN_351_DMUB;
5616 break;
5617 case IP_VERSION(3, 6, 0):
5618 fw_name_dmub = FIRMWARE_DCN_36_DMUB;
5619 break;
5620 case IP_VERSION(4, 0, 1):
5621 fw_name_dmub = FIRMWARE_DCN_401_DMUB;
5622 break;
5623 default:
5624 /* ASIC doesn't support DMUB. */
5625 return 0;
5626 }
5627 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
5628 "%s", fw_name_dmub);
5629 return r;
5630 }
5631
dm_early_init(struct amdgpu_ip_block * ip_block)5632 static int dm_early_init(struct amdgpu_ip_block *ip_block)
5633 {
5634 struct amdgpu_device *adev = ip_block->adev;
5635 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5636 struct atom_context *ctx = mode_info->atom_context;
5637 int index = GetIndexIntoMasterTable(DATA, Object_Header);
5638 u16 data_offset;
5639
5640 /* if there is no object header, skip DM */
5641 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
5642 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
5643 drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
5644 return -ENOENT;
5645 }
5646
5647 switch (adev->asic_type) {
5648 #if defined(CONFIG_DRM_AMD_DC_SI)
5649 case CHIP_TAHITI:
5650 case CHIP_PITCAIRN:
5651 case CHIP_VERDE:
5652 adev->mode_info.num_crtc = 6;
5653 adev->mode_info.num_hpd = 6;
5654 adev->mode_info.num_dig = 6;
5655 break;
5656 case CHIP_OLAND:
5657 adev->mode_info.num_crtc = 2;
5658 adev->mode_info.num_hpd = 2;
5659 adev->mode_info.num_dig = 2;
5660 break;
5661 #endif
5662 case CHIP_BONAIRE:
5663 case CHIP_HAWAII:
5664 adev->mode_info.num_crtc = 6;
5665 adev->mode_info.num_hpd = 6;
5666 adev->mode_info.num_dig = 6;
5667 break;
5668 case CHIP_KAVERI:
5669 adev->mode_info.num_crtc = 4;
5670 adev->mode_info.num_hpd = 6;
5671 adev->mode_info.num_dig = 7;
5672 break;
5673 case CHIP_KABINI:
5674 case CHIP_MULLINS:
5675 adev->mode_info.num_crtc = 2;
5676 adev->mode_info.num_hpd = 6;
5677 adev->mode_info.num_dig = 6;
5678 break;
5679 case CHIP_FIJI:
5680 case CHIP_TONGA:
5681 adev->mode_info.num_crtc = 6;
5682 adev->mode_info.num_hpd = 6;
5683 adev->mode_info.num_dig = 7;
5684 break;
5685 case CHIP_CARRIZO:
5686 adev->mode_info.num_crtc = 3;
5687 adev->mode_info.num_hpd = 6;
5688 adev->mode_info.num_dig = 9;
5689 break;
5690 case CHIP_STONEY:
5691 adev->mode_info.num_crtc = 2;
5692 adev->mode_info.num_hpd = 6;
5693 adev->mode_info.num_dig = 9;
5694 break;
5695 case CHIP_POLARIS11:
5696 case CHIP_POLARIS12:
5697 adev->mode_info.num_crtc = 5;
5698 adev->mode_info.num_hpd = 5;
5699 adev->mode_info.num_dig = 5;
5700 break;
5701 case CHIP_POLARIS10:
5702 case CHIP_VEGAM:
5703 adev->mode_info.num_crtc = 6;
5704 adev->mode_info.num_hpd = 6;
5705 adev->mode_info.num_dig = 6;
5706 break;
5707 case CHIP_VEGA10:
5708 case CHIP_VEGA12:
5709 case CHIP_VEGA20:
5710 adev->mode_info.num_crtc = 6;
5711 adev->mode_info.num_hpd = 6;
5712 adev->mode_info.num_dig = 6;
5713 break;
5714 default:
5715
5716 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
5717 case IP_VERSION(2, 0, 2):
5718 case IP_VERSION(3, 0, 0):
5719 adev->mode_info.num_crtc = 6;
5720 adev->mode_info.num_hpd = 6;
5721 adev->mode_info.num_dig = 6;
5722 break;
5723 case IP_VERSION(2, 0, 0):
5724 case IP_VERSION(3, 0, 2):
5725 adev->mode_info.num_crtc = 5;
5726 adev->mode_info.num_hpd = 5;
5727 adev->mode_info.num_dig = 5;
5728 break;
5729 case IP_VERSION(2, 0, 3):
5730 case IP_VERSION(3, 0, 3):
5731 adev->mode_info.num_crtc = 2;
5732 adev->mode_info.num_hpd = 2;
5733 adev->mode_info.num_dig = 2;
5734 break;
5735 case IP_VERSION(1, 0, 0):
5736 case IP_VERSION(1, 0, 1):
5737 case IP_VERSION(3, 0, 1):
5738 case IP_VERSION(2, 1, 0):
5739 case IP_VERSION(3, 1, 2):
5740 case IP_VERSION(3, 1, 3):
5741 case IP_VERSION(3, 1, 4):
5742 case IP_VERSION(3, 1, 5):
5743 case IP_VERSION(3, 1, 6):
5744 case IP_VERSION(3, 2, 0):
5745 case IP_VERSION(3, 2, 1):
5746 case IP_VERSION(3, 5, 0):
5747 case IP_VERSION(3, 5, 1):
5748 case IP_VERSION(3, 6, 0):
5749 case IP_VERSION(4, 0, 1):
5750 adev->mode_info.num_crtc = 4;
5751 adev->mode_info.num_hpd = 4;
5752 adev->mode_info.num_dig = 4;
5753 break;
5754 default:
5755 drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
5756 amdgpu_ip_version(adev, DCE_HWIP, 0));
5757 return -EINVAL;
5758 }
5759 break;
5760 }
5761
5762 if (adev->mode_info.funcs == NULL)
5763 adev->mode_info.funcs = &dm_display_funcs;
5764
5765 /*
5766 * Note: Do NOT change adev->audio_endpt_rreg and
5767 * adev->audio_endpt_wreg because they are initialised in
5768 * amdgpu_device_init()
5769 */
5770 #if defined(CONFIG_DEBUG_KERNEL_DC)
5771 device_create_file(
5772 adev_to_drm(adev)->dev,
5773 &dev_attr_s3_debug);
5774 #endif
5775 adev->dc_enabled = true;
5776
5777 return dm_init_microcode(adev);
5778 }
5779
modereset_required(struct drm_crtc_state * crtc_state)5780 static bool modereset_required(struct drm_crtc_state *crtc_state)
5781 {
5782 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
5783 }
5784
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)5785 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
5786 {
5787 drm_encoder_cleanup(encoder);
5788 kfree(encoder);
5789 }
5790
5791 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
5792 .destroy = amdgpu_dm_encoder_destroy,
5793 };
5794
5795 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)5796 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5797 const enum surface_pixel_format format,
5798 enum dc_color_space *color_space)
5799 {
5800 bool full_range;
5801
5802 *color_space = COLOR_SPACE_SRGB;
5803
5804 /* DRM color properties only affect non-RGB formats. */
5805 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5806 return 0;
5807
5808 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5809
5810 switch (plane_state->color_encoding) {
5811 case DRM_COLOR_YCBCR_BT601:
5812 if (full_range)
5813 *color_space = COLOR_SPACE_YCBCR601;
5814 else
5815 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5816 break;
5817
5818 case DRM_COLOR_YCBCR_BT709:
5819 if (full_range)
5820 *color_space = COLOR_SPACE_YCBCR709;
5821 else
5822 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5823 break;
5824
5825 case DRM_COLOR_YCBCR_BT2020:
5826 if (full_range)
5827 *color_space = COLOR_SPACE_2020_YCBCR_FULL;
5828 else
5829 *color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
5830 break;
5831
5832 default:
5833 return -EINVAL;
5834 }
5835
5836 return 0;
5837 }
5838
5839 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const u64 tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface)5840 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5841 const struct drm_plane_state *plane_state,
5842 const u64 tiling_flags,
5843 struct dc_plane_info *plane_info,
5844 struct dc_plane_address *address,
5845 bool tmz_surface)
5846 {
5847 const struct drm_framebuffer *fb = plane_state->fb;
5848 const struct amdgpu_framebuffer *afb =
5849 to_amdgpu_framebuffer(plane_state->fb);
5850 int ret;
5851
5852 memset(plane_info, 0, sizeof(*plane_info));
5853
5854 switch (fb->format->format) {
5855 case DRM_FORMAT_C8:
5856 plane_info->format =
5857 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5858 break;
5859 case DRM_FORMAT_RGB565:
5860 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5861 break;
5862 case DRM_FORMAT_XRGB8888:
5863 case DRM_FORMAT_ARGB8888:
5864 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5865 break;
5866 case DRM_FORMAT_XRGB2101010:
5867 case DRM_FORMAT_ARGB2101010:
5868 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5869 break;
5870 case DRM_FORMAT_XBGR2101010:
5871 case DRM_FORMAT_ABGR2101010:
5872 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5873 break;
5874 case DRM_FORMAT_XBGR8888:
5875 case DRM_FORMAT_ABGR8888:
5876 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5877 break;
5878 case DRM_FORMAT_NV21:
5879 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5880 break;
5881 case DRM_FORMAT_NV12:
5882 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5883 break;
5884 case DRM_FORMAT_P010:
5885 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5886 break;
5887 case DRM_FORMAT_XRGB16161616F:
5888 case DRM_FORMAT_ARGB16161616F:
5889 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5890 break;
5891 case DRM_FORMAT_XBGR16161616F:
5892 case DRM_FORMAT_ABGR16161616F:
5893 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5894 break;
5895 case DRM_FORMAT_XRGB16161616:
5896 case DRM_FORMAT_ARGB16161616:
5897 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5898 break;
5899 case DRM_FORMAT_XBGR16161616:
5900 case DRM_FORMAT_ABGR16161616:
5901 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5902 break;
5903 default:
5904 drm_err(adev_to_drm(adev),
5905 "Unsupported screen format %p4cc\n",
5906 &fb->format->format);
5907 return -EINVAL;
5908 }
5909
5910 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5911 case DRM_MODE_ROTATE_0:
5912 plane_info->rotation = ROTATION_ANGLE_0;
5913 break;
5914 case DRM_MODE_ROTATE_90:
5915 plane_info->rotation = ROTATION_ANGLE_90;
5916 break;
5917 case DRM_MODE_ROTATE_180:
5918 plane_info->rotation = ROTATION_ANGLE_180;
5919 break;
5920 case DRM_MODE_ROTATE_270:
5921 plane_info->rotation = ROTATION_ANGLE_270;
5922 break;
5923 default:
5924 plane_info->rotation = ROTATION_ANGLE_0;
5925 break;
5926 }
5927
5928
5929 plane_info->visible = true;
5930 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5931
5932 plane_info->layer_index = plane_state->normalized_zpos;
5933
5934 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5935 &plane_info->color_space);
5936 if (ret)
5937 return ret;
5938
5939 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5940 plane_info->rotation, tiling_flags,
5941 &plane_info->tiling_info,
5942 &plane_info->plane_size,
5943 &plane_info->dcc, address,
5944 tmz_surface);
5945 if (ret)
5946 return ret;
5947
5948 amdgpu_dm_plane_fill_blending_from_plane_state(
5949 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5950 &plane_info->global_alpha, &plane_info->global_alpha_value);
5951
5952 return 0;
5953 }
5954
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)5955 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5956 struct dc_plane_state *dc_plane_state,
5957 struct drm_plane_state *plane_state,
5958 struct drm_crtc_state *crtc_state)
5959 {
5960 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5961 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5962 struct dc_scaling_info scaling_info;
5963 struct dc_plane_info plane_info;
5964 int ret;
5965
5966 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5967 if (ret)
5968 return ret;
5969
5970 dc_plane_state->src_rect = scaling_info.src_rect;
5971 dc_plane_state->dst_rect = scaling_info.dst_rect;
5972 dc_plane_state->clip_rect = scaling_info.clip_rect;
5973 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5974
5975 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5976 afb->tiling_flags,
5977 &plane_info,
5978 &dc_plane_state->address,
5979 afb->tmz_surface);
5980 if (ret)
5981 return ret;
5982
5983 dc_plane_state->format = plane_info.format;
5984 dc_plane_state->color_space = plane_info.color_space;
5985 dc_plane_state->format = plane_info.format;
5986 dc_plane_state->plane_size = plane_info.plane_size;
5987 dc_plane_state->rotation = plane_info.rotation;
5988 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5989 dc_plane_state->stereo_format = plane_info.stereo_format;
5990 dc_plane_state->tiling_info = plane_info.tiling_info;
5991 dc_plane_state->visible = plane_info.visible;
5992 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5993 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5994 dc_plane_state->global_alpha = plane_info.global_alpha;
5995 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5996 dc_plane_state->dcc = plane_info.dcc;
5997 dc_plane_state->layer_index = plane_info.layer_index;
5998 dc_plane_state->flip_int_enabled = true;
5999
6000 /*
6001 * Always set input transfer function, since plane state is refreshed
6002 * every time.
6003 */
6004 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
6005 plane_state,
6006 dc_plane_state);
6007 if (ret)
6008 return ret;
6009
6010 return 0;
6011 }
6012
fill_dc_dirty_rect(struct drm_plane * plane,struct rect * dirty_rect,int32_t x,s32 y,s32 width,s32 height,int * i,bool ffu)6013 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
6014 struct rect *dirty_rect, int32_t x,
6015 s32 y, s32 width, s32 height,
6016 int *i, bool ffu)
6017 {
6018 WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
6019
6020 dirty_rect->x = x;
6021 dirty_rect->y = y;
6022 dirty_rect->width = width;
6023 dirty_rect->height = height;
6024
6025 if (ffu)
6026 drm_dbg(plane->dev,
6027 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
6028 plane->base.id, width, height);
6029 else
6030 drm_dbg(plane->dev,
6031 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
6032 plane->base.id, x, y, width, height);
6033
6034 (*i)++;
6035 }
6036
6037 /**
6038 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
6039 *
6040 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
6041 * remote fb
6042 * @old_plane_state: Old state of @plane
6043 * @new_plane_state: New state of @plane
6044 * @crtc_state: New state of CRTC connected to the @plane
6045 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
6046 * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
6047 * If PSR SU is enabled and damage clips are available, only the regions of the screen
6048 * that have changed will be updated. If PSR SU is not enabled,
6049 * or if damage clips are not available, the entire screen will be updated.
6050 * @dirty_regions_changed: dirty regions changed
6051 *
6052 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
6053 * (referred to as "damage clips" in DRM nomenclature) that require updating on
6054 * the eDP remote buffer. The responsibility of specifying the dirty regions is
6055 * amdgpu_dm's.
6056 *
6057 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
6058 * plane with regions that require flushing to the eDP remote buffer. In
6059 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
6060 * implicitly provide damage clips without any client support via the plane
6061 * bounds.
6062 */
fill_dc_dirty_rects(struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,struct drm_crtc_state * crtc_state,struct dc_flip_addrs * flip_addrs,bool is_psr_su,bool * dirty_regions_changed)6063 static void fill_dc_dirty_rects(struct drm_plane *plane,
6064 struct drm_plane_state *old_plane_state,
6065 struct drm_plane_state *new_plane_state,
6066 struct drm_crtc_state *crtc_state,
6067 struct dc_flip_addrs *flip_addrs,
6068 bool is_psr_su,
6069 bool *dirty_regions_changed)
6070 {
6071 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6072 struct rect *dirty_rects = flip_addrs->dirty_rects;
6073 u32 num_clips;
6074 struct drm_mode_rect *clips;
6075 bool bb_changed;
6076 bool fb_changed;
6077 u32 i = 0;
6078 *dirty_regions_changed = false;
6079
6080 /*
6081 * Cursor plane has it's own dirty rect update interface. See
6082 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
6083 */
6084 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6085 return;
6086
6087 if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
6088 goto ffu;
6089
6090 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
6091 clips = drm_plane_get_damage_clips(new_plane_state);
6092
6093 if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
6094 is_psr_su)))
6095 goto ffu;
6096
6097 if (!dm_crtc_state->mpo_requested) {
6098 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
6099 goto ffu;
6100
6101 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
6102 fill_dc_dirty_rect(new_plane_state->plane,
6103 &dirty_rects[flip_addrs->dirty_rect_count],
6104 clips->x1, clips->y1,
6105 clips->x2 - clips->x1, clips->y2 - clips->y1,
6106 &flip_addrs->dirty_rect_count,
6107 false);
6108 return;
6109 }
6110
6111 /*
6112 * MPO is requested. Add entire plane bounding box to dirty rects if
6113 * flipped to or damaged.
6114 *
6115 * If plane is moved or resized, also add old bounding box to dirty
6116 * rects.
6117 */
6118 fb_changed = old_plane_state->fb->base.id !=
6119 new_plane_state->fb->base.id;
6120 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
6121 old_plane_state->crtc_y != new_plane_state->crtc_y ||
6122 old_plane_state->crtc_w != new_plane_state->crtc_w ||
6123 old_plane_state->crtc_h != new_plane_state->crtc_h);
6124
6125 drm_dbg(plane->dev,
6126 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
6127 new_plane_state->plane->base.id,
6128 bb_changed, fb_changed, num_clips);
6129
6130 *dirty_regions_changed = bb_changed;
6131
6132 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
6133 goto ffu;
6134
6135 if (bb_changed) {
6136 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
6137 new_plane_state->crtc_x,
6138 new_plane_state->crtc_y,
6139 new_plane_state->crtc_w,
6140 new_plane_state->crtc_h, &i, false);
6141
6142 /* Add old plane bounding-box if plane is moved or resized */
6143 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
6144 old_plane_state->crtc_x,
6145 old_plane_state->crtc_y,
6146 old_plane_state->crtc_w,
6147 old_plane_state->crtc_h, &i, false);
6148 }
6149
6150 if (num_clips) {
6151 for (; i < num_clips; clips++)
6152 fill_dc_dirty_rect(new_plane_state->plane,
6153 &dirty_rects[i], clips->x1,
6154 clips->y1, clips->x2 - clips->x1,
6155 clips->y2 - clips->y1, &i, false);
6156 } else if (fb_changed && !bb_changed) {
6157 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
6158 new_plane_state->crtc_x,
6159 new_plane_state->crtc_y,
6160 new_plane_state->crtc_w,
6161 new_plane_state->crtc_h, &i, false);
6162 }
6163
6164 flip_addrs->dirty_rect_count = i;
6165 return;
6166
6167 ffu:
6168 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
6169 dm_crtc_state->base.mode.crtc_hdisplay,
6170 dm_crtc_state->base.mode.crtc_vdisplay,
6171 &flip_addrs->dirty_rect_count, true);
6172 }
6173
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)6174 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
6175 const struct dm_connector_state *dm_state,
6176 struct dc_stream_state *stream)
6177 {
6178 enum amdgpu_rmx_type rmx_type;
6179
6180 struct rect src = { 0 }; /* viewport in composition space*/
6181 struct rect dst = { 0 }; /* stream addressable area */
6182
6183 /* no mode. nothing to be done */
6184 if (!mode)
6185 return;
6186
6187 /* Full screen scaling by default */
6188 src.width = mode->hdisplay;
6189 src.height = mode->vdisplay;
6190 dst.width = stream->timing.h_addressable;
6191 dst.height = stream->timing.v_addressable;
6192
6193 if (dm_state) {
6194 rmx_type = dm_state->scaling;
6195 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
6196 if (src.width * dst.height <
6197 src.height * dst.width) {
6198 /* height needs less upscaling/more downscaling */
6199 dst.width = src.width *
6200 dst.height / src.height;
6201 } else {
6202 /* width needs less upscaling/more downscaling */
6203 dst.height = src.height *
6204 dst.width / src.width;
6205 }
6206 } else if (rmx_type == RMX_CENTER) {
6207 dst = src;
6208 }
6209
6210 dst.x = (stream->timing.h_addressable - dst.width) / 2;
6211 dst.y = (stream->timing.v_addressable - dst.height) / 2;
6212
6213 if (dm_state->underscan_enable) {
6214 dst.x += dm_state->underscan_hborder / 2;
6215 dst.y += dm_state->underscan_vborder / 2;
6216 dst.width -= dm_state->underscan_hborder;
6217 dst.height -= dm_state->underscan_vborder;
6218 }
6219 }
6220
6221 stream->src = src;
6222 stream->dst = dst;
6223
6224 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
6225 dst.x, dst.y, dst.width, dst.height);
6226
6227 }
6228
6229 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)6230 convert_color_depth_from_display_info(const struct drm_connector *connector,
6231 bool is_y420, int requested_bpc)
6232 {
6233 u8 bpc;
6234
6235 if (is_y420) {
6236 bpc = 8;
6237
6238 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
6239 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
6240 bpc = 16;
6241 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
6242 bpc = 12;
6243 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
6244 bpc = 10;
6245 } else {
6246 bpc = (uint8_t)connector->display_info.bpc;
6247 /* Assume 8 bpc by default if no bpc is specified. */
6248 bpc = bpc ? bpc : 8;
6249 }
6250
6251 if (requested_bpc > 0) {
6252 /*
6253 * Cap display bpc based on the user requested value.
6254 *
6255 * The value for state->max_bpc may not correctly updated
6256 * depending on when the connector gets added to the state
6257 * or if this was called outside of atomic check, so it
6258 * can't be used directly.
6259 */
6260 bpc = min_t(u8, bpc, requested_bpc);
6261
6262 /* Round down to the nearest even number. */
6263 bpc = bpc - (bpc & 1);
6264 }
6265
6266 switch (bpc) {
6267 case 0:
6268 /*
6269 * Temporary Work around, DRM doesn't parse color depth for
6270 * EDID revision before 1.4
6271 * TODO: Fix edid parsing
6272 */
6273 return COLOR_DEPTH_888;
6274 case 6:
6275 return COLOR_DEPTH_666;
6276 case 8:
6277 return COLOR_DEPTH_888;
6278 case 10:
6279 return COLOR_DEPTH_101010;
6280 case 12:
6281 return COLOR_DEPTH_121212;
6282 case 14:
6283 return COLOR_DEPTH_141414;
6284 case 16:
6285 return COLOR_DEPTH_161616;
6286 default:
6287 return COLOR_DEPTH_UNDEFINED;
6288 }
6289 }
6290
6291 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)6292 get_aspect_ratio(const struct drm_display_mode *mode_in)
6293 {
6294 /* 1-1 mapping, since both enums follow the HDMI spec. */
6295 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6296 }
6297
6298 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing,const struct drm_connector_state * connector_state)6299 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
6300 const struct drm_connector_state *connector_state)
6301 {
6302 enum dc_color_space color_space = COLOR_SPACE_SRGB;
6303
6304 switch (connector_state->colorspace) {
6305 case DRM_MODE_COLORIMETRY_BT601_YCC:
6306 if (dc_crtc_timing->flags.Y_ONLY)
6307 color_space = COLOR_SPACE_YCBCR601_LIMITED;
6308 else
6309 color_space = COLOR_SPACE_YCBCR601;
6310 break;
6311 case DRM_MODE_COLORIMETRY_BT709_YCC:
6312 if (dc_crtc_timing->flags.Y_ONLY)
6313 color_space = COLOR_SPACE_YCBCR709_LIMITED;
6314 else
6315 color_space = COLOR_SPACE_YCBCR709;
6316 break;
6317 case DRM_MODE_COLORIMETRY_OPRGB:
6318 color_space = COLOR_SPACE_ADOBERGB;
6319 break;
6320 case DRM_MODE_COLORIMETRY_BT2020_RGB:
6321 case DRM_MODE_COLORIMETRY_BT2020_YCC:
6322 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
6323 color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
6324 else
6325 color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
6326 break;
6327 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
6328 default:
6329 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
6330 color_space = COLOR_SPACE_SRGB;
6331 if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED)
6332 color_space = COLOR_SPACE_SRGB_LIMITED;
6333 /*
6334 * 27030khz is the separation point between HDTV and SDTV
6335 * according to HDMI spec, we use YCbCr709 and YCbCr601
6336 * respectively
6337 */
6338 } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
6339 if (dc_crtc_timing->flags.Y_ONLY)
6340 color_space =
6341 COLOR_SPACE_YCBCR709_LIMITED;
6342 else
6343 color_space = COLOR_SPACE_YCBCR709;
6344 } else {
6345 if (dc_crtc_timing->flags.Y_ONLY)
6346 color_space =
6347 COLOR_SPACE_YCBCR601_LIMITED;
6348 else
6349 color_space = COLOR_SPACE_YCBCR601;
6350 }
6351 break;
6352 }
6353
6354 return color_space;
6355 }
6356
6357 static enum display_content_type
get_output_content_type(const struct drm_connector_state * connector_state)6358 get_output_content_type(const struct drm_connector_state *connector_state)
6359 {
6360 switch (connector_state->content_type) {
6361 default:
6362 case DRM_MODE_CONTENT_TYPE_NO_DATA:
6363 return DISPLAY_CONTENT_TYPE_NO_DATA;
6364 case DRM_MODE_CONTENT_TYPE_GRAPHICS:
6365 return DISPLAY_CONTENT_TYPE_GRAPHICS;
6366 case DRM_MODE_CONTENT_TYPE_PHOTO:
6367 return DISPLAY_CONTENT_TYPE_PHOTO;
6368 case DRM_MODE_CONTENT_TYPE_CINEMA:
6369 return DISPLAY_CONTENT_TYPE_CINEMA;
6370 case DRM_MODE_CONTENT_TYPE_GAME:
6371 return DISPLAY_CONTENT_TYPE_GAME;
6372 }
6373 }
6374
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)6375 static bool adjust_colour_depth_from_display_info(
6376 struct dc_crtc_timing *timing_out,
6377 const struct drm_display_info *info)
6378 {
6379 enum dc_color_depth depth = timing_out->display_color_depth;
6380 int normalized_clk;
6381
6382 do {
6383 normalized_clk = timing_out->pix_clk_100hz / 10;
6384 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6385 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6386 normalized_clk /= 2;
6387 /* Adjusting pix clock following on HDMI spec based on colour depth */
6388 switch (depth) {
6389 case COLOR_DEPTH_888:
6390 break;
6391 case COLOR_DEPTH_101010:
6392 normalized_clk = (normalized_clk * 30) / 24;
6393 break;
6394 case COLOR_DEPTH_121212:
6395 normalized_clk = (normalized_clk * 36) / 24;
6396 break;
6397 case COLOR_DEPTH_161616:
6398 normalized_clk = (normalized_clk * 48) / 24;
6399 break;
6400 default:
6401 /* The above depths are the only ones valid for HDMI. */
6402 return false;
6403 }
6404 if (normalized_clk <= info->max_tmds_clock) {
6405 timing_out->display_color_depth = depth;
6406 return true;
6407 }
6408 } while (--depth > COLOR_DEPTH_666);
6409 return false;
6410 }
6411
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)6412 static void fill_stream_properties_from_drm_display_mode(
6413 struct dc_stream_state *stream,
6414 const struct drm_display_mode *mode_in,
6415 const struct drm_connector *connector,
6416 const struct drm_connector_state *connector_state,
6417 const struct dc_stream_state *old_stream,
6418 int requested_bpc)
6419 {
6420 struct dc_crtc_timing *timing_out = &stream->timing;
6421 const struct drm_display_info *info = &connector->display_info;
6422 struct amdgpu_dm_connector *aconnector = NULL;
6423 struct hdmi_vendor_infoframe hv_frame;
6424 struct hdmi_avi_infoframe avi_frame;
6425 ssize_t err;
6426
6427 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
6428 aconnector = to_amdgpu_dm_connector(connector);
6429
6430 memset(&hv_frame, 0, sizeof(hv_frame));
6431 memset(&avi_frame, 0, sizeof(avi_frame));
6432
6433 timing_out->h_border_left = 0;
6434 timing_out->h_border_right = 0;
6435 timing_out->v_border_top = 0;
6436 timing_out->v_border_bottom = 0;
6437 /* TODO: un-hardcode */
6438 if (drm_mode_is_420_only(info, mode_in)
6439 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6440 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6441 else if (drm_mode_is_420_also(info, mode_in)
6442 && aconnector
6443 && aconnector->force_yuv420_output)
6444 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6445 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
6446 && aconnector
6447 && aconnector->force_yuv422_output)
6448 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
6449 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6450 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6451 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6452 else
6453 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6454
6455 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6456 timing_out->display_color_depth = convert_color_depth_from_display_info(
6457 connector,
6458 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6459 requested_bpc);
6460 timing_out->scan_type = SCANNING_TYPE_NODATA;
6461 timing_out->hdmi_vic = 0;
6462
6463 if (old_stream) {
6464 timing_out->vic = old_stream->timing.vic;
6465 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6466 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6467 } else {
6468 timing_out->vic = drm_match_cea_mode(mode_in);
6469 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6470 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6471 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6472 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6473 }
6474
6475 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6476 err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
6477 (struct drm_connector *)connector,
6478 mode_in);
6479 if (err < 0)
6480 drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
6481 connector->name, err);
6482 timing_out->vic = avi_frame.video_code;
6483 err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
6484 (struct drm_connector *)connector,
6485 mode_in);
6486 if (err < 0)
6487 drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
6488 connector->name, err);
6489 timing_out->hdmi_vic = hv_frame.vic;
6490 }
6491
6492 if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
6493 timing_out->h_addressable = mode_in->hdisplay;
6494 timing_out->h_total = mode_in->htotal;
6495 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6496 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6497 timing_out->v_total = mode_in->vtotal;
6498 timing_out->v_addressable = mode_in->vdisplay;
6499 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6500 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6501 timing_out->pix_clk_100hz = mode_in->clock * 10;
6502 } else {
6503 timing_out->h_addressable = mode_in->crtc_hdisplay;
6504 timing_out->h_total = mode_in->crtc_htotal;
6505 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6506 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6507 timing_out->v_total = mode_in->crtc_vtotal;
6508 timing_out->v_addressable = mode_in->crtc_vdisplay;
6509 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6510 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6511 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6512 }
6513
6514 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6515
6516 stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
6517 stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
6518 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6519 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6520 drm_mode_is_420_also(info, mode_in) &&
6521 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6522 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6523 adjust_colour_depth_from_display_info(timing_out, info);
6524 }
6525 }
6526
6527 stream->output_color_space = get_output_color_space(timing_out, connector_state);
6528 stream->content_type = get_output_content_type(connector_state);
6529 }
6530
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)6531 static void fill_audio_info(struct audio_info *audio_info,
6532 const struct drm_connector *drm_connector,
6533 const struct dc_sink *dc_sink)
6534 {
6535 int i = 0;
6536 int cea_revision = 0;
6537 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6538
6539 audio_info->manufacture_id = edid_caps->manufacturer_id;
6540 audio_info->product_id = edid_caps->product_id;
6541
6542 cea_revision = drm_connector->display_info.cea_rev;
6543
6544 strscpy(audio_info->display_name,
6545 edid_caps->display_name,
6546 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6547
6548 if (cea_revision >= 3) {
6549 audio_info->mode_count = edid_caps->audio_mode_count;
6550
6551 for (i = 0; i < audio_info->mode_count; ++i) {
6552 audio_info->modes[i].format_code =
6553 (enum audio_format_code)
6554 (edid_caps->audio_modes[i].format_code);
6555 audio_info->modes[i].channel_count =
6556 edid_caps->audio_modes[i].channel_count;
6557 audio_info->modes[i].sample_rates.all =
6558 edid_caps->audio_modes[i].sample_rate;
6559 audio_info->modes[i].sample_size =
6560 edid_caps->audio_modes[i].sample_size;
6561 }
6562 }
6563
6564 audio_info->flags.all = edid_caps->speaker_flags;
6565
6566 /* TODO: We only check for the progressive mode, check for interlace mode too */
6567 if (drm_connector->latency_present[0]) {
6568 audio_info->video_latency = drm_connector->video_latency[0];
6569 audio_info->audio_latency = drm_connector->audio_latency[0];
6570 }
6571
6572 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6573
6574 }
6575
6576 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)6577 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6578 struct drm_display_mode *dst_mode)
6579 {
6580 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6581 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6582 dst_mode->crtc_clock = src_mode->crtc_clock;
6583 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6584 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6585 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
6586 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6587 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6588 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6589 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6590 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6591 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6592 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6593 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6594 }
6595
6596 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)6597 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6598 const struct drm_display_mode *native_mode,
6599 bool scale_enabled)
6600 {
6601 if (scale_enabled || (
6602 native_mode->clock == drm_mode->clock &&
6603 native_mode->htotal == drm_mode->htotal &&
6604 native_mode->vtotal == drm_mode->vtotal)) {
6605 if (native_mode->crtc_clock)
6606 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6607 } else {
6608 /* no scaling nor amdgpu inserted, no need to patch */
6609 }
6610 }
6611
6612 static struct dc_sink *
create_fake_sink(struct drm_device * dev,struct dc_link * link)6613 create_fake_sink(struct drm_device *dev, struct dc_link *link)
6614 {
6615 struct dc_sink_init_data sink_init_data = { 0 };
6616 struct dc_sink *sink = NULL;
6617
6618 sink_init_data.link = link;
6619 sink_init_data.sink_signal = link->connector_signal;
6620
6621 sink = dc_sink_create(&sink_init_data);
6622 if (!sink) {
6623 drm_err(dev, "Failed to create sink!\n");
6624 return NULL;
6625 }
6626 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6627
6628 return sink;
6629 }
6630
set_multisync_trigger_params(struct dc_stream_state * stream)6631 static void set_multisync_trigger_params(
6632 struct dc_stream_state *stream)
6633 {
6634 struct dc_stream_state *master = NULL;
6635
6636 if (stream->triggered_crtc_reset.enabled) {
6637 master = stream->triggered_crtc_reset.event_source;
6638 stream->triggered_crtc_reset.event =
6639 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6640 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6641 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6642 }
6643 }
6644
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)6645 static void set_master_stream(struct dc_stream_state *stream_set[],
6646 int stream_count)
6647 {
6648 int j, highest_rfr = 0, master_stream = 0;
6649
6650 for (j = 0; j < stream_count; j++) {
6651 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6652 int refresh_rate = 0;
6653
6654 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6655 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6656 if (refresh_rate > highest_rfr) {
6657 highest_rfr = refresh_rate;
6658 master_stream = j;
6659 }
6660 }
6661 }
6662 for (j = 0; j < stream_count; j++) {
6663 if (stream_set[j])
6664 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6665 }
6666 }
6667
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)6668 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6669 {
6670 int i = 0;
6671 struct dc_stream_state *stream;
6672
6673 if (context->stream_count < 2)
6674 return;
6675 for (i = 0; i < context->stream_count ; i++) {
6676 if (!context->streams[i])
6677 continue;
6678 /*
6679 * TODO: add a function to read AMD VSDB bits and set
6680 * crtc_sync_master.multi_sync_enabled flag
6681 * For now it's set to false
6682 */
6683 }
6684
6685 set_master_stream(context->streams, context->stream_count);
6686
6687 for (i = 0; i < context->stream_count ; i++) {
6688 stream = context->streams[i];
6689
6690 if (!stream)
6691 continue;
6692
6693 set_multisync_trigger_params(stream);
6694 }
6695 }
6696
6697 /**
6698 * DOC: FreeSync Video
6699 *
6700 * When a userspace application wants to play a video, the content follows a
6701 * standard format definition that usually specifies the FPS for that format.
6702 * The below list illustrates some video format and the expected FPS,
6703 * respectively:
6704 *
6705 * - TV/NTSC (23.976 FPS)
6706 * - Cinema (24 FPS)
6707 * - TV/PAL (25 FPS)
6708 * - TV/NTSC (29.97 FPS)
6709 * - TV/NTSC (30 FPS)
6710 * - Cinema HFR (48 FPS)
6711 * - TV/PAL (50 FPS)
6712 * - Commonly used (60 FPS)
6713 * - Multiples of 24 (48,72,96 FPS)
6714 *
6715 * The list of standards video format is not huge and can be added to the
6716 * connector modeset list beforehand. With that, userspace can leverage
6717 * FreeSync to extends the front porch in order to attain the target refresh
6718 * rate. Such a switch will happen seamlessly, without screen blanking or
6719 * reprogramming of the output in any other way. If the userspace requests a
6720 * modesetting change compatible with FreeSync modes that only differ in the
6721 * refresh rate, DC will skip the full update and avoid blink during the
6722 * transition. For example, the video player can change the modesetting from
6723 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6724 * causing any display blink. This same concept can be applied to a mode
6725 * setting change.
6726 */
6727 static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector * aconnector,bool use_probed_modes)6728 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6729 bool use_probed_modes)
6730 {
6731 struct drm_display_mode *m, *m_pref = NULL;
6732 u16 current_refresh, highest_refresh;
6733 struct list_head *list_head = use_probed_modes ?
6734 &aconnector->base.probed_modes :
6735 &aconnector->base.modes;
6736
6737 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
6738 return NULL;
6739
6740 if (aconnector->freesync_vid_base.clock != 0)
6741 return &aconnector->freesync_vid_base;
6742
6743 /* Find the preferred mode */
6744 list_for_each_entry(m, list_head, head) {
6745 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6746 m_pref = m;
6747 break;
6748 }
6749 }
6750
6751 if (!m_pref) {
6752 /* Probably an EDID with no preferred mode. Fallback to first entry */
6753 m_pref = list_first_entry_or_null(
6754 &aconnector->base.modes, struct drm_display_mode, head);
6755 if (!m_pref) {
6756 drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
6757 return NULL;
6758 }
6759 }
6760
6761 highest_refresh = drm_mode_vrefresh(m_pref);
6762
6763 /*
6764 * Find the mode with highest refresh rate with same resolution.
6765 * For some monitors, preferred mode is not the mode with highest
6766 * supported refresh rate.
6767 */
6768 list_for_each_entry(m, list_head, head) {
6769 current_refresh = drm_mode_vrefresh(m);
6770
6771 if (m->hdisplay == m_pref->hdisplay &&
6772 m->vdisplay == m_pref->vdisplay &&
6773 highest_refresh < current_refresh) {
6774 highest_refresh = current_refresh;
6775 m_pref = m;
6776 }
6777 }
6778
6779 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6780 return m_pref;
6781 }
6782
is_freesync_video_mode(const struct drm_display_mode * mode,struct amdgpu_dm_connector * aconnector)6783 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6784 struct amdgpu_dm_connector *aconnector)
6785 {
6786 struct drm_display_mode *high_mode;
6787 int timing_diff;
6788
6789 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6790 if (!high_mode || !mode)
6791 return false;
6792
6793 timing_diff = high_mode->vtotal - mode->vtotal;
6794
6795 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6796 high_mode->hdisplay != mode->hdisplay ||
6797 high_mode->vdisplay != mode->vdisplay ||
6798 high_mode->hsync_start != mode->hsync_start ||
6799 high_mode->hsync_end != mode->hsync_end ||
6800 high_mode->htotal != mode->htotal ||
6801 high_mode->hskew != mode->hskew ||
6802 high_mode->vscan != mode->vscan ||
6803 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6804 high_mode->vsync_end - mode->vsync_end != timing_diff)
6805 return false;
6806 else
6807 return true;
6808 }
6809
6810 #if defined(CONFIG_DRM_AMD_DC_FP)
update_dsc_caps(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)6811 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6812 struct dc_sink *sink, struct dc_stream_state *stream,
6813 struct dsc_dec_dpcd_caps *dsc_caps)
6814 {
6815 stream->timing.flags.DSC = 0;
6816 dsc_caps->is_dsc_supported = false;
6817
6818 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6819 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6820 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6821 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6822 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6823 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6824 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6825 dsc_caps);
6826 }
6827 }
6828
apply_dsc_policy_for_edp(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps,uint32_t max_dsc_target_bpp_limit_override)6829 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6830 struct dc_sink *sink, struct dc_stream_state *stream,
6831 struct dsc_dec_dpcd_caps *dsc_caps,
6832 uint32_t max_dsc_target_bpp_limit_override)
6833 {
6834 const struct dc_link_settings *verified_link_cap = NULL;
6835 u32 link_bw_in_kbps;
6836 u32 edp_min_bpp_x16, edp_max_bpp_x16;
6837 struct dc *dc = sink->ctx->dc;
6838 struct dc_dsc_bw_range bw_range = {0};
6839 struct dc_dsc_config dsc_cfg = {0};
6840 struct dc_dsc_config_options dsc_options = {0};
6841
6842 dc_dsc_get_default_config_option(dc, &dsc_options);
6843 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
6844
6845 verified_link_cap = dc_link_get_link_cap(stream->link);
6846 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6847 edp_min_bpp_x16 = 8 * 16;
6848 edp_max_bpp_x16 = 8 * 16;
6849
6850 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6851 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6852
6853 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6854 edp_min_bpp_x16 = edp_max_bpp_x16;
6855
6856 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6857 dc->debug.dsc_min_slice_height_override,
6858 edp_min_bpp_x16, edp_max_bpp_x16,
6859 dsc_caps,
6860 &stream->timing,
6861 dc_link_get_highest_encoding_format(aconnector->dc_link),
6862 &bw_range)) {
6863
6864 if (bw_range.max_kbps < link_bw_in_kbps) {
6865 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6866 dsc_caps,
6867 &dsc_options,
6868 0,
6869 &stream->timing,
6870 dc_link_get_highest_encoding_format(aconnector->dc_link),
6871 &dsc_cfg)) {
6872 stream->timing.dsc_cfg = dsc_cfg;
6873 stream->timing.flags.DSC = 1;
6874 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6875 }
6876 return;
6877 }
6878 }
6879
6880 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6881 dsc_caps,
6882 &dsc_options,
6883 link_bw_in_kbps,
6884 &stream->timing,
6885 dc_link_get_highest_encoding_format(aconnector->dc_link),
6886 &dsc_cfg)) {
6887 stream->timing.dsc_cfg = dsc_cfg;
6888 stream->timing.flags.DSC = 1;
6889 }
6890 }
6891
apply_dsc_policy_for_stream(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)6892 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6893 struct dc_sink *sink, struct dc_stream_state *stream,
6894 struct dsc_dec_dpcd_caps *dsc_caps)
6895 {
6896 struct drm_connector *drm_connector = &aconnector->base;
6897 u32 link_bandwidth_kbps;
6898 struct dc *dc = sink->ctx->dc;
6899 u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
6900 u32 dsc_max_supported_bw_in_kbps;
6901 u32 max_dsc_target_bpp_limit_override =
6902 drm_connector->display_info.max_dsc_bpp;
6903 struct dc_dsc_config_options dsc_options = {0};
6904
6905 dc_dsc_get_default_config_option(dc, &dsc_options);
6906 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
6907
6908 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6909 dc_link_get_link_cap(aconnector->dc_link));
6910
6911 /* Set DSC policy according to dsc_clock_en */
6912 dc_dsc_policy_set_enable_dsc_when_not_needed(
6913 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6914
6915 if (sink->sink_signal == SIGNAL_TYPE_EDP &&
6916 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
6917 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6918
6919 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6920
6921 } else if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6922 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6923 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6924 dsc_caps,
6925 &dsc_options,
6926 link_bandwidth_kbps,
6927 &stream->timing,
6928 dc_link_get_highest_encoding_format(aconnector->dc_link),
6929 &stream->timing.dsc_cfg)) {
6930 stream->timing.flags.DSC = 1;
6931 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
6932 __func__, drm_connector->name);
6933 }
6934 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6935 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
6936 dc_link_get_highest_encoding_format(aconnector->dc_link));
6937 max_supported_bw_in_kbps = link_bandwidth_kbps;
6938 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6939
6940 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6941 max_supported_bw_in_kbps > 0 &&
6942 dsc_max_supported_bw_in_kbps > 0)
6943 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6944 dsc_caps,
6945 &dsc_options,
6946 dsc_max_supported_bw_in_kbps,
6947 &stream->timing,
6948 dc_link_get_highest_encoding_format(aconnector->dc_link),
6949 &stream->timing.dsc_cfg)) {
6950 stream->timing.flags.DSC = 1;
6951 drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
6952 __func__, drm_connector->name);
6953 }
6954 }
6955 }
6956
6957 /* Overwrite the stream flag if DSC is enabled through debugfs */
6958 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6959 stream->timing.flags.DSC = 1;
6960
6961 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6962 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6963
6964 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6965 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6966
6967 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6968 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6969 }
6970 #endif
6971
6972 static struct dc_stream_state *
create_stream_for_sink(struct drm_connector * connector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)6973 create_stream_for_sink(struct drm_connector *connector,
6974 const struct drm_display_mode *drm_mode,
6975 const struct dm_connector_state *dm_state,
6976 const struct dc_stream_state *old_stream,
6977 int requested_bpc)
6978 {
6979 struct drm_device *dev = connector->dev;
6980 struct amdgpu_dm_connector *aconnector = NULL;
6981 struct drm_display_mode *preferred_mode = NULL;
6982 const struct drm_connector_state *con_state = &dm_state->base;
6983 struct dc_stream_state *stream = NULL;
6984 struct drm_display_mode mode;
6985 struct drm_display_mode saved_mode;
6986 struct drm_display_mode *freesync_mode = NULL;
6987 bool native_mode_found = false;
6988 bool recalculate_timing = false;
6989 bool scale = dm_state->scaling != RMX_OFF;
6990 int mode_refresh;
6991 int preferred_refresh = 0;
6992 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
6993 #if defined(CONFIG_DRM_AMD_DC_FP)
6994 struct dsc_dec_dpcd_caps dsc_caps;
6995 #endif
6996 struct dc_link *link = NULL;
6997 struct dc_sink *sink = NULL;
6998
6999 drm_mode_init(&mode, drm_mode);
7000 memset(&saved_mode, 0, sizeof(saved_mode));
7001
7002 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
7003 aconnector = NULL;
7004 aconnector = to_amdgpu_dm_connector(connector);
7005 link = aconnector->dc_link;
7006 } else {
7007 struct drm_writeback_connector *wbcon = NULL;
7008 struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
7009
7010 wbcon = drm_connector_to_writeback(connector);
7011 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
7012 link = dm_wbcon->link;
7013 }
7014
7015 if (!aconnector || !aconnector->dc_sink) {
7016 sink = create_fake_sink(dev, link);
7017 if (!sink)
7018 return stream;
7019
7020 } else {
7021 sink = aconnector->dc_sink;
7022 dc_sink_retain(sink);
7023 }
7024
7025 stream = dc_create_stream_for_sink(sink);
7026
7027 if (stream == NULL) {
7028 drm_err(dev, "Failed to create stream for sink!\n");
7029 goto finish;
7030 }
7031
7032 /* We leave this NULL for writeback connectors */
7033 stream->dm_stream_context = aconnector;
7034
7035 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
7036 connector->display_info.hdmi.scdc.scrambling.low_rates;
7037
7038 list_for_each_entry(preferred_mode, &connector->modes, head) {
7039 /* Search for preferred mode */
7040 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
7041 native_mode_found = true;
7042 break;
7043 }
7044 }
7045 if (!native_mode_found)
7046 preferred_mode = list_first_entry_or_null(
7047 &connector->modes,
7048 struct drm_display_mode,
7049 head);
7050
7051 mode_refresh = drm_mode_vrefresh(&mode);
7052
7053 if (preferred_mode == NULL) {
7054 /*
7055 * This may not be an error, the use case is when we have no
7056 * usermode calls to reset and set mode upon hotplug. In this
7057 * case, we call set mode ourselves to restore the previous mode
7058 * and the modelist may not be filled in time.
7059 */
7060 drm_dbg_driver(dev, "No preferred mode found\n");
7061 } else if (aconnector) {
7062 recalculate_timing = amdgpu_freesync_vid_mode &&
7063 is_freesync_video_mode(&mode, aconnector);
7064 if (recalculate_timing) {
7065 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
7066 drm_mode_copy(&saved_mode, &mode);
7067 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
7068 drm_mode_copy(&mode, freesync_mode);
7069 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
7070 } else {
7071 decide_crtc_timing_for_drm_display_mode(
7072 &mode, preferred_mode, scale);
7073
7074 preferred_refresh = drm_mode_vrefresh(preferred_mode);
7075 }
7076 }
7077
7078 if (recalculate_timing)
7079 drm_mode_set_crtcinfo(&saved_mode, 0);
7080
7081 /*
7082 * If scaling is enabled and refresh rate didn't change
7083 * we copy the vic and polarities of the old timings
7084 */
7085 if (!scale || mode_refresh != preferred_refresh)
7086 fill_stream_properties_from_drm_display_mode(
7087 stream, &mode, connector, con_state, NULL,
7088 requested_bpc);
7089 else
7090 fill_stream_properties_from_drm_display_mode(
7091 stream, &mode, connector, con_state, old_stream,
7092 requested_bpc);
7093
7094 /* The rest isn't needed for writeback connectors */
7095 if (!aconnector)
7096 goto finish;
7097
7098 if (aconnector->timing_changed) {
7099 drm_dbg(aconnector->base.dev,
7100 "overriding timing for automated test, bpc %d, changing to %d\n",
7101 stream->timing.display_color_depth,
7102 aconnector->timing_requested->display_color_depth);
7103 stream->timing = *aconnector->timing_requested;
7104 }
7105
7106 #if defined(CONFIG_DRM_AMD_DC_FP)
7107 /* SST DSC determination policy */
7108 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
7109 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
7110 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
7111 #endif
7112
7113 update_stream_scaling_settings(&mode, dm_state, stream);
7114
7115 fill_audio_info(
7116 &stream->audio_info,
7117 connector,
7118 sink);
7119
7120 update_stream_signal(stream, sink);
7121
7122 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
7123 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
7124
7125 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
7126 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
7127 stream->signal == SIGNAL_TYPE_EDP) {
7128 const struct dc_edid_caps *edid_caps;
7129 unsigned int disable_colorimetry = 0;
7130
7131 if (aconnector->dc_sink) {
7132 edid_caps = &aconnector->dc_sink->edid_caps;
7133 disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
7134 }
7135
7136 //
7137 // should decide stream support vsc sdp colorimetry capability
7138 // before building vsc info packet
7139 //
7140 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
7141 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
7142 !disable_colorimetry;
7143
7144 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
7145 tf = TRANSFER_FUNC_GAMMA_22;
7146 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
7147 aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
7148
7149 }
7150 finish:
7151 dc_sink_release(sink);
7152
7153 return stream;
7154 }
7155
7156 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)7157 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
7158 {
7159 bool connected;
7160 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7161
7162 /*
7163 * Notes:
7164 * 1. This interface is NOT called in context of HPD irq.
7165 * 2. This interface *is called* in context of user-mode ioctl. Which
7166 * makes it a bad place for *any* MST-related activity.
7167 */
7168
7169 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
7170 !aconnector->fake_enable)
7171 connected = (aconnector->dc_sink != NULL);
7172 else
7173 connected = (aconnector->base.force == DRM_FORCE_ON ||
7174 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
7175
7176 update_subconnector_property(aconnector);
7177
7178 return (connected ? connector_status_connected :
7179 connector_status_disconnected);
7180 }
7181
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)7182 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
7183 struct drm_connector_state *connector_state,
7184 struct drm_property *property,
7185 uint64_t val)
7186 {
7187 struct drm_device *dev = connector->dev;
7188 struct amdgpu_device *adev = drm_to_adev(dev);
7189 struct dm_connector_state *dm_old_state =
7190 to_dm_connector_state(connector->state);
7191 struct dm_connector_state *dm_new_state =
7192 to_dm_connector_state(connector_state);
7193
7194 int ret = -EINVAL;
7195
7196 if (property == dev->mode_config.scaling_mode_property) {
7197 enum amdgpu_rmx_type rmx_type;
7198
7199 switch (val) {
7200 case DRM_MODE_SCALE_CENTER:
7201 rmx_type = RMX_CENTER;
7202 break;
7203 case DRM_MODE_SCALE_ASPECT:
7204 rmx_type = RMX_ASPECT;
7205 break;
7206 case DRM_MODE_SCALE_FULLSCREEN:
7207 rmx_type = RMX_FULL;
7208 break;
7209 case DRM_MODE_SCALE_NONE:
7210 default:
7211 rmx_type = RMX_OFF;
7212 break;
7213 }
7214
7215 if (dm_old_state->scaling == rmx_type)
7216 return 0;
7217
7218 dm_new_state->scaling = rmx_type;
7219 ret = 0;
7220 } else if (property == adev->mode_info.underscan_hborder_property) {
7221 dm_new_state->underscan_hborder = val;
7222 ret = 0;
7223 } else if (property == adev->mode_info.underscan_vborder_property) {
7224 dm_new_state->underscan_vborder = val;
7225 ret = 0;
7226 } else if (property == adev->mode_info.underscan_property) {
7227 dm_new_state->underscan_enable = val;
7228 ret = 0;
7229 }
7230
7231 return ret;
7232 }
7233
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)7234 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7235 const struct drm_connector_state *state,
7236 struct drm_property *property,
7237 uint64_t *val)
7238 {
7239 struct drm_device *dev = connector->dev;
7240 struct amdgpu_device *adev = drm_to_adev(dev);
7241 struct dm_connector_state *dm_state =
7242 to_dm_connector_state(state);
7243 int ret = -EINVAL;
7244
7245 if (property == dev->mode_config.scaling_mode_property) {
7246 switch (dm_state->scaling) {
7247 case RMX_CENTER:
7248 *val = DRM_MODE_SCALE_CENTER;
7249 break;
7250 case RMX_ASPECT:
7251 *val = DRM_MODE_SCALE_ASPECT;
7252 break;
7253 case RMX_FULL:
7254 *val = DRM_MODE_SCALE_FULLSCREEN;
7255 break;
7256 case RMX_OFF:
7257 default:
7258 *val = DRM_MODE_SCALE_NONE;
7259 break;
7260 }
7261 ret = 0;
7262 } else if (property == adev->mode_info.underscan_hborder_property) {
7263 *val = dm_state->underscan_hborder;
7264 ret = 0;
7265 } else if (property == adev->mode_info.underscan_vborder_property) {
7266 *val = dm_state->underscan_vborder;
7267 ret = 0;
7268 } else if (property == adev->mode_info.underscan_property) {
7269 *val = dm_state->underscan_enable;
7270 ret = 0;
7271 }
7272
7273 return ret;
7274 }
7275
7276 /**
7277 * DOC: panel power savings
7278 *
7279 * The display manager allows you to set your desired **panel power savings**
7280 * level (between 0-4, with 0 representing off), e.g. using the following::
7281 *
7282 * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
7283 *
7284 * Modifying this value can have implications on color accuracy, so tread
7285 * carefully.
7286 */
7287
panel_power_savings_show(struct device * device,struct device_attribute * attr,char * buf)7288 static ssize_t panel_power_savings_show(struct device *device,
7289 struct device_attribute *attr,
7290 char *buf)
7291 {
7292 struct drm_connector *connector = dev_get_drvdata(device);
7293 struct drm_device *dev = connector->dev;
7294 u8 val;
7295
7296 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
7297 val = to_dm_connector_state(connector->state)->abm_level ==
7298 ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
7299 to_dm_connector_state(connector->state)->abm_level;
7300 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7301
7302 return sysfs_emit(buf, "%u\n", val);
7303 }
7304
panel_power_savings_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)7305 static ssize_t panel_power_savings_store(struct device *device,
7306 struct device_attribute *attr,
7307 const char *buf, size_t count)
7308 {
7309 struct drm_connector *connector = dev_get_drvdata(device);
7310 struct drm_device *dev = connector->dev;
7311 long val;
7312 int ret;
7313
7314 ret = kstrtol(buf, 0, &val);
7315
7316 if (ret)
7317 return ret;
7318
7319 if (val < 0 || val > 4)
7320 return -EINVAL;
7321
7322 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
7323 to_dm_connector_state(connector->state)->abm_level = val ?:
7324 ABM_LEVEL_IMMEDIATE_DISABLE;
7325 drm_modeset_unlock(&dev->mode_config.connection_mutex);
7326
7327 drm_kms_helper_hotplug_event(dev);
7328
7329 return count;
7330 }
7331
7332 static DEVICE_ATTR_RW(panel_power_savings);
7333
7334 static struct attribute *amdgpu_attrs[] = {
7335 &dev_attr_panel_power_savings.attr,
7336 NULL
7337 };
7338
7339 static const struct attribute_group amdgpu_group = {
7340 .name = "amdgpu",
7341 .attrs = amdgpu_attrs
7342 };
7343
7344 static bool
amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector * amdgpu_dm_connector)7345 amdgpu_dm_should_create_sysfs(struct amdgpu_dm_connector *amdgpu_dm_connector)
7346 {
7347 if (amdgpu_dm_abm_level >= 0)
7348 return false;
7349
7350 if (amdgpu_dm_connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
7351 return false;
7352
7353 /* check for OLED panels */
7354 if (amdgpu_dm_connector->bl_idx >= 0) {
7355 struct drm_device *drm = amdgpu_dm_connector->base.dev;
7356 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
7357 struct amdgpu_dm_backlight_caps *caps;
7358
7359 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx];
7360 if (caps->aux_support)
7361 return false;
7362 }
7363
7364 return true;
7365 }
7366
amdgpu_dm_connector_unregister(struct drm_connector * connector)7367 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7368 {
7369 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7370
7371 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
7372 sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
7373
7374 cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
7375 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7376 }
7377
amdgpu_dm_connector_destroy(struct drm_connector * connector)7378 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7379 {
7380 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7381 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7382 struct amdgpu_display_manager *dm = &adev->dm;
7383
7384 /*
7385 * Call only if mst_mgr was initialized before since it's not done
7386 * for all connector types.
7387 */
7388 if (aconnector->mst_mgr.dev)
7389 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7390
7391 if (aconnector->bl_idx != -1) {
7392 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
7393 dm->backlight_dev[aconnector->bl_idx] = NULL;
7394 }
7395
7396 if (aconnector->dc_em_sink)
7397 dc_sink_release(aconnector->dc_em_sink);
7398 aconnector->dc_em_sink = NULL;
7399 if (aconnector->dc_sink)
7400 dc_sink_release(aconnector->dc_sink);
7401 aconnector->dc_sink = NULL;
7402
7403 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7404 drm_connector_unregister(connector);
7405 drm_connector_cleanup(connector);
7406 kfree(aconnector->dm_dp_aux.aux.name);
7407
7408 kfree(connector);
7409 }
7410
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)7411 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7412 {
7413 struct dm_connector_state *state =
7414 to_dm_connector_state(connector->state);
7415
7416 if (connector->state)
7417 __drm_atomic_helper_connector_destroy_state(connector->state);
7418
7419 kfree(state);
7420
7421 state = kzalloc(sizeof(*state), GFP_KERNEL);
7422
7423 if (state) {
7424 state->scaling = RMX_OFF;
7425 state->underscan_enable = false;
7426 state->underscan_hborder = 0;
7427 state->underscan_vborder = 0;
7428 state->base.max_requested_bpc = 8;
7429 state->vcpi_slots = 0;
7430 state->pbn = 0;
7431
7432 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
7433 if (amdgpu_dm_abm_level <= 0)
7434 state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
7435 else
7436 state->abm_level = amdgpu_dm_abm_level;
7437 }
7438
7439 __drm_atomic_helper_connector_reset(connector, &state->base);
7440 }
7441 }
7442
7443 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)7444 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7445 {
7446 struct dm_connector_state *state =
7447 to_dm_connector_state(connector->state);
7448
7449 struct dm_connector_state *new_state =
7450 kmemdup(state, sizeof(*state), GFP_KERNEL);
7451
7452 if (!new_state)
7453 return NULL;
7454
7455 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7456
7457 new_state->freesync_capable = state->freesync_capable;
7458 new_state->abm_level = state->abm_level;
7459 new_state->scaling = state->scaling;
7460 new_state->underscan_enable = state->underscan_enable;
7461 new_state->underscan_hborder = state->underscan_hborder;
7462 new_state->underscan_vborder = state->underscan_vborder;
7463 new_state->vcpi_slots = state->vcpi_slots;
7464 new_state->pbn = state->pbn;
7465 return &new_state->base;
7466 }
7467
7468 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)7469 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7470 {
7471 struct amdgpu_dm_connector *amdgpu_dm_connector =
7472 to_amdgpu_dm_connector(connector);
7473 int r;
7474
7475 if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector)) {
7476 r = sysfs_create_group(&connector->kdev->kobj,
7477 &amdgpu_group);
7478 if (r)
7479 return r;
7480 }
7481
7482 amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
7483
7484 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7485 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7486 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7487 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7488 if (r)
7489 return r;
7490 }
7491
7492 #if defined(CONFIG_DEBUG_FS)
7493 connector_debugfs_init(amdgpu_dm_connector);
7494 #endif
7495
7496 return 0;
7497 }
7498
amdgpu_dm_connector_funcs_force(struct drm_connector * connector)7499 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
7500 {
7501 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7502 struct dc_link *dc_link = aconnector->dc_link;
7503 struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
7504 const struct drm_edid *drm_edid;
7505 struct i2c_adapter *ddc;
7506 struct drm_device *dev = connector->dev;
7507
7508 if (dc_link && dc_link->aux_mode)
7509 ddc = &aconnector->dm_dp_aux.aux.ddc;
7510 else
7511 ddc = &aconnector->i2c->base;
7512
7513 drm_edid = drm_edid_read_ddc(connector, ddc);
7514 drm_edid_connector_update(connector, drm_edid);
7515 if (!drm_edid) {
7516 drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
7517 return;
7518 }
7519
7520 aconnector->drm_edid = drm_edid;
7521 /* Update emulated (virtual) sink's EDID */
7522 if (dc_em_sink && dc_link) {
7523 // FIXME: Get rid of drm_edid_raw()
7524 const struct edid *edid = drm_edid_raw(drm_edid);
7525
7526 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
7527 memmove(dc_em_sink->dc_edid.raw_edid, edid,
7528 (edid->extensions + 1) * EDID_LENGTH);
7529 dm_helpers_parse_edid_caps(
7530 dc_link,
7531 &dc_em_sink->dc_edid,
7532 &dc_em_sink->edid_caps);
7533 }
7534 }
7535
7536 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7537 .reset = amdgpu_dm_connector_funcs_reset,
7538 .detect = amdgpu_dm_connector_detect,
7539 .fill_modes = drm_helper_probe_single_connector_modes,
7540 .destroy = amdgpu_dm_connector_destroy,
7541 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7542 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7543 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7544 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7545 .late_register = amdgpu_dm_connector_late_register,
7546 .early_unregister = amdgpu_dm_connector_unregister,
7547 .force = amdgpu_dm_connector_funcs_force
7548 };
7549
get_modes(struct drm_connector * connector)7550 static int get_modes(struct drm_connector *connector)
7551 {
7552 return amdgpu_dm_connector_get_modes(connector);
7553 }
7554
create_eml_sink(struct amdgpu_dm_connector * aconnector)7555 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7556 {
7557 struct drm_connector *connector = &aconnector->base;
7558 struct dc_link *dc_link = aconnector->dc_link;
7559 struct dc_sink_init_data init_params = {
7560 .link = aconnector->dc_link,
7561 .sink_signal = SIGNAL_TYPE_VIRTUAL
7562 };
7563 const struct drm_edid *drm_edid;
7564 const struct edid *edid;
7565 struct i2c_adapter *ddc;
7566
7567 if (dc_link && dc_link->aux_mode)
7568 ddc = &aconnector->dm_dp_aux.aux.ddc;
7569 else
7570 ddc = &aconnector->i2c->base;
7571
7572 drm_edid = drm_edid_read_ddc(connector, ddc);
7573 drm_edid_connector_update(connector, drm_edid);
7574 if (!drm_edid) {
7575 drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
7576 return;
7577 }
7578
7579 if (connector->display_info.is_hdmi)
7580 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
7581
7582 aconnector->drm_edid = drm_edid;
7583
7584 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
7585 aconnector->dc_em_sink = dc_link_add_remote_sink(
7586 aconnector->dc_link,
7587 (uint8_t *)edid,
7588 (edid->extensions + 1) * EDID_LENGTH,
7589 &init_params);
7590
7591 if (aconnector->base.force == DRM_FORCE_ON) {
7592 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7593 aconnector->dc_link->local_sink :
7594 aconnector->dc_em_sink;
7595 if (aconnector->dc_sink)
7596 dc_sink_retain(aconnector->dc_sink);
7597 }
7598 }
7599
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)7600 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7601 {
7602 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7603
7604 /*
7605 * In case of headless boot with force on for DP managed connector
7606 * Those settings have to be != 0 to get initial modeset
7607 */
7608 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7609 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7610 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7611 }
7612
7613 create_eml_sink(aconnector);
7614 }
7615
dm_validate_stream_and_context(struct dc * dc,struct dc_stream_state * stream)7616 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
7617 struct dc_stream_state *stream)
7618 {
7619 enum dc_status dc_result = DC_ERROR_UNEXPECTED;
7620 struct dc_plane_state *dc_plane_state = NULL;
7621 struct dc_state *dc_state = NULL;
7622
7623 if (!stream)
7624 goto cleanup;
7625
7626 dc_plane_state = dc_create_plane_state(dc);
7627 if (!dc_plane_state)
7628 goto cleanup;
7629
7630 dc_state = dc_state_create(dc, NULL);
7631 if (!dc_state)
7632 goto cleanup;
7633
7634 /* populate stream to plane */
7635 dc_plane_state->src_rect.height = stream->src.height;
7636 dc_plane_state->src_rect.width = stream->src.width;
7637 dc_plane_state->dst_rect.height = stream->src.height;
7638 dc_plane_state->dst_rect.width = stream->src.width;
7639 dc_plane_state->clip_rect.height = stream->src.height;
7640 dc_plane_state->clip_rect.width = stream->src.width;
7641 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
7642 dc_plane_state->plane_size.surface_size.height = stream->src.height;
7643 dc_plane_state->plane_size.surface_size.width = stream->src.width;
7644 dc_plane_state->plane_size.chroma_size.height = stream->src.height;
7645 dc_plane_state->plane_size.chroma_size.width = stream->src.width;
7646 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
7647 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
7648 dc_plane_state->rotation = ROTATION_ANGLE_0;
7649 dc_plane_state->is_tiling_rotated = false;
7650 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
7651
7652 dc_result = dc_validate_stream(dc, stream);
7653 if (dc_result == DC_OK)
7654 dc_result = dc_validate_plane(dc, dc_plane_state);
7655
7656 if (dc_result == DC_OK)
7657 dc_result = dc_state_add_stream(dc, dc_state, stream);
7658
7659 if (dc_result == DC_OK && !dc_state_add_plane(
7660 dc,
7661 stream,
7662 dc_plane_state,
7663 dc_state))
7664 dc_result = DC_FAIL_ATTACH_SURFACES;
7665
7666 if (dc_result == DC_OK)
7667 dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
7668
7669 cleanup:
7670 if (dc_state)
7671 dc_state_release(dc_state);
7672
7673 if (dc_plane_state)
7674 dc_plane_state_release(dc_plane_state);
7675
7676 return dc_result;
7677 }
7678
7679 struct dc_stream_state *
create_validate_stream_for_sink(struct drm_connector * connector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)7680 create_validate_stream_for_sink(struct drm_connector *connector,
7681 const struct drm_display_mode *drm_mode,
7682 const struct dm_connector_state *dm_state,
7683 const struct dc_stream_state *old_stream)
7684 {
7685 struct amdgpu_dm_connector *aconnector = NULL;
7686 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7687 struct dc_stream_state *stream;
7688 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7689 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7690 enum dc_status dc_result = DC_OK;
7691 uint8_t bpc_limit = 6;
7692
7693 if (!dm_state)
7694 return NULL;
7695
7696 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
7697 aconnector = to_amdgpu_dm_connector(connector);
7698
7699 if (aconnector &&
7700 (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
7701 aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
7702 bpc_limit = 8;
7703
7704 do {
7705 drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
7706 stream = create_stream_for_sink(connector, drm_mode,
7707 dm_state, old_stream,
7708 requested_bpc);
7709 if (stream == NULL) {
7710 drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
7711 break;
7712 }
7713
7714 dc_result = dc_validate_stream(adev->dm.dc, stream);
7715
7716 if (!aconnector) /* writeback connector */
7717 return stream;
7718
7719 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7720 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7721
7722 if (dc_result == DC_OK)
7723 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
7724
7725 if (dc_result != DC_OK) {
7726 DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
7727 drm_mode->hdisplay,
7728 drm_mode->vdisplay,
7729 drm_mode->clock,
7730 dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
7731 dc_color_depth_to_str(stream->timing.display_color_depth),
7732 dc_status_to_str(dc_result));
7733
7734 dc_stream_release(stream);
7735 stream = NULL;
7736 requested_bpc -= 2; /* lower bpc to retry validation */
7737 }
7738
7739 } while (stream == NULL && requested_bpc >= bpc_limit);
7740
7741 switch (dc_result) {
7742 /*
7743 * If we failed to validate DP bandwidth stream with the requested RGB color depth,
7744 * we try to fallback and configure in order:
7745 * YUV422 (8bpc, 6bpc)
7746 * YUV420 (8bpc, 6bpc)
7747 */
7748 case DC_FAIL_ENC_VALIDATE:
7749 case DC_EXCEED_DONGLE_CAP:
7750 case DC_NO_DP_LINK_BANDWIDTH:
7751 /* recursively entered twice and already tried both YUV422 and YUV420 */
7752 if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
7753 break;
7754 /* first failure; try YUV422 */
7755 if (!aconnector->force_yuv422_output) {
7756 drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
7757 __func__, __LINE__, dc_result);
7758 aconnector->force_yuv422_output = true;
7759 /* recursively entered and YUV422 failed, try YUV420 */
7760 } else if (!aconnector->force_yuv420_output) {
7761 drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
7762 __func__, __LINE__, dc_result);
7763 aconnector->force_yuv420_output = true;
7764 }
7765 stream = create_validate_stream_for_sink(connector, drm_mode,
7766 dm_state, old_stream);
7767 aconnector->force_yuv422_output = false;
7768 aconnector->force_yuv420_output = false;
7769 break;
7770 case DC_OK:
7771 break;
7772 default:
7773 drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
7774 __func__, __LINE__, dc_result);
7775 break;
7776 }
7777
7778 return stream;
7779 }
7780
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,const struct drm_display_mode * mode)7781 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7782 const struct drm_display_mode *mode)
7783 {
7784 int result = MODE_ERROR;
7785 struct dc_sink *dc_sink;
7786 struct drm_display_mode *test_mode;
7787 /* TODO: Unhardcode stream count */
7788 struct dc_stream_state *stream;
7789 /* we always have an amdgpu_dm_connector here since we got
7790 * here via the amdgpu_dm_connector_helper_funcs
7791 */
7792 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7793
7794 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7795 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7796 return result;
7797
7798 /*
7799 * Only run this the first time mode_valid is called to initilialize
7800 * EDID mgmt
7801 */
7802 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7803 !aconnector->dc_em_sink)
7804 handle_edid_mgmt(aconnector);
7805
7806 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7807
7808 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7809 aconnector->base.force != DRM_FORCE_ON) {
7810 drm_err(connector->dev, "dc_sink is NULL!\n");
7811 goto fail;
7812 }
7813
7814 test_mode = drm_mode_duplicate(connector->dev, mode);
7815 if (!test_mode)
7816 goto fail;
7817
7818 drm_mode_set_crtcinfo(test_mode, 0);
7819
7820 stream = create_validate_stream_for_sink(connector, test_mode,
7821 to_dm_connector_state(connector->state),
7822 NULL);
7823 drm_mode_destroy(connector->dev, test_mode);
7824 if (stream) {
7825 dc_stream_release(stream);
7826 result = MODE_OK;
7827 }
7828
7829 fail:
7830 /* TODO: error handling*/
7831 return result;
7832 }
7833
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)7834 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7835 struct dc_info_packet *out)
7836 {
7837 struct hdmi_drm_infoframe frame;
7838 unsigned char buf[30]; /* 26 + 4 */
7839 ssize_t len;
7840 int ret, i;
7841
7842 memset(out, 0, sizeof(*out));
7843
7844 if (!state->hdr_output_metadata)
7845 return 0;
7846
7847 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7848 if (ret)
7849 return ret;
7850
7851 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7852 if (len < 0)
7853 return (int)len;
7854
7855 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7856 if (len != 30)
7857 return -EINVAL;
7858
7859 /* Prepare the infopacket for DC. */
7860 switch (state->connector->connector_type) {
7861 case DRM_MODE_CONNECTOR_HDMIA:
7862 out->hb0 = 0x87; /* type */
7863 out->hb1 = 0x01; /* version */
7864 out->hb2 = 0x1A; /* length */
7865 out->sb[0] = buf[3]; /* checksum */
7866 i = 1;
7867 break;
7868
7869 case DRM_MODE_CONNECTOR_DisplayPort:
7870 case DRM_MODE_CONNECTOR_eDP:
7871 out->hb0 = 0x00; /* sdp id, zero */
7872 out->hb1 = 0x87; /* type */
7873 out->hb2 = 0x1D; /* payload len - 1 */
7874 out->hb3 = (0x13 << 2); /* sdp version */
7875 out->sb[0] = 0x01; /* version */
7876 out->sb[1] = 0x1A; /* length */
7877 i = 2;
7878 break;
7879
7880 default:
7881 return -EINVAL;
7882 }
7883
7884 memcpy(&out->sb[i], &buf[4], 26);
7885 out->valid = true;
7886
7887 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7888 sizeof(out->sb), false);
7889
7890 return 0;
7891 }
7892
7893 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)7894 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7895 struct drm_atomic_state *state)
7896 {
7897 struct drm_connector_state *new_con_state =
7898 drm_atomic_get_new_connector_state(state, conn);
7899 struct drm_connector_state *old_con_state =
7900 drm_atomic_get_old_connector_state(state, conn);
7901 struct drm_crtc *crtc = new_con_state->crtc;
7902 struct drm_crtc_state *new_crtc_state;
7903 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
7904 int ret;
7905
7906 if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
7907 return -EINVAL;
7908
7909 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7910
7911 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
7912 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
7913 if (ret < 0)
7914 return ret;
7915 }
7916
7917 if (!crtc)
7918 return 0;
7919
7920 if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
7921 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7922 if (IS_ERR(new_crtc_state))
7923 return PTR_ERR(new_crtc_state);
7924
7925 new_crtc_state->mode_changed = true;
7926 }
7927
7928 if (new_con_state->colorspace != old_con_state->colorspace) {
7929 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7930 if (IS_ERR(new_crtc_state))
7931 return PTR_ERR(new_crtc_state);
7932
7933 new_crtc_state->mode_changed = true;
7934 }
7935
7936 if (new_con_state->content_type != old_con_state->content_type) {
7937 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7938 if (IS_ERR(new_crtc_state))
7939 return PTR_ERR(new_crtc_state);
7940
7941 new_crtc_state->mode_changed = true;
7942 }
7943
7944 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7945 struct dc_info_packet hdr_infopacket;
7946
7947 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7948 if (ret)
7949 return ret;
7950
7951 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7952 if (IS_ERR(new_crtc_state))
7953 return PTR_ERR(new_crtc_state);
7954
7955 /*
7956 * DC considers the stream backends changed if the
7957 * static metadata changes. Forcing the modeset also
7958 * gives a simple way for userspace to switch from
7959 * 8bpc to 10bpc when setting the metadata to enter
7960 * or exit HDR.
7961 *
7962 * Changing the static metadata after it's been
7963 * set is permissible, however. So only force a
7964 * modeset if we're entering or exiting HDR.
7965 */
7966 new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
7967 !old_con_state->hdr_output_metadata ||
7968 !new_con_state->hdr_output_metadata;
7969 }
7970
7971 return 0;
7972 }
7973
7974 static const struct drm_connector_helper_funcs
7975 amdgpu_dm_connector_helper_funcs = {
7976 /*
7977 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7978 * modes will be filtered by drm_mode_validate_size(), and those modes
7979 * are missing after user start lightdm. So we need to renew modes list.
7980 * in get_modes call back, not just return the modes count
7981 */
7982 .get_modes = get_modes,
7983 .mode_valid = amdgpu_dm_connector_mode_valid,
7984 .atomic_check = amdgpu_dm_connector_atomic_check,
7985 };
7986
dm_encoder_helper_disable(struct drm_encoder * encoder)7987 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7988 {
7989
7990 }
7991
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)7992 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7993 {
7994 switch (display_color_depth) {
7995 case COLOR_DEPTH_666:
7996 return 6;
7997 case COLOR_DEPTH_888:
7998 return 8;
7999 case COLOR_DEPTH_101010:
8000 return 10;
8001 case COLOR_DEPTH_121212:
8002 return 12;
8003 case COLOR_DEPTH_141414:
8004 return 14;
8005 case COLOR_DEPTH_161616:
8006 return 16;
8007 default:
8008 break;
8009 }
8010 return 0;
8011 }
8012
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)8013 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
8014 struct drm_crtc_state *crtc_state,
8015 struct drm_connector_state *conn_state)
8016 {
8017 struct drm_atomic_state *state = crtc_state->state;
8018 struct drm_connector *connector = conn_state->connector;
8019 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8020 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
8021 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
8022 struct drm_dp_mst_topology_mgr *mst_mgr;
8023 struct drm_dp_mst_port *mst_port;
8024 struct drm_dp_mst_topology_state *mst_state;
8025 enum dc_color_depth color_depth;
8026 int clock, bpp = 0;
8027 bool is_y420 = false;
8028
8029 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
8030 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
8031 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8032 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8033 enum drm_mode_status result;
8034
8035 result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
8036 if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
8037 drm_dbg_driver(encoder->dev,
8038 "mode %dx%d@%dHz is not native, enabling scaling\n",
8039 adjusted_mode->hdisplay, adjusted_mode->vdisplay,
8040 drm_mode_vrefresh(adjusted_mode));
8041 dm_new_connector_state->scaling = RMX_ASPECT;
8042 }
8043 return 0;
8044 }
8045
8046 if (!aconnector->mst_output_port)
8047 return 0;
8048
8049 mst_port = aconnector->mst_output_port;
8050 mst_mgr = &aconnector->mst_root->mst_mgr;
8051
8052 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
8053 return 0;
8054
8055 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
8056 if (IS_ERR(mst_state))
8057 return PTR_ERR(mst_state);
8058
8059 mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
8060
8061 if (!state->duplicated) {
8062 int max_bpc = conn_state->max_requested_bpc;
8063
8064 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
8065 aconnector->force_yuv420_output;
8066 color_depth = convert_color_depth_from_display_info(connector,
8067 is_y420,
8068 max_bpc);
8069 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
8070 clock = adjusted_mode->clock;
8071 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
8072 }
8073
8074 dm_new_connector_state->vcpi_slots =
8075 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
8076 dm_new_connector_state->pbn);
8077 if (dm_new_connector_state->vcpi_slots < 0) {
8078 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
8079 return dm_new_connector_state->vcpi_slots;
8080 }
8081 return 0;
8082 }
8083
8084 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
8085 .disable = dm_encoder_helper_disable,
8086 .atomic_check = dm_encoder_helper_atomic_check
8087 };
8088
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)8089 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
8090 struct dc_state *dc_state,
8091 struct dsc_mst_fairness_vars *vars)
8092 {
8093 struct dc_stream_state *stream = NULL;
8094 struct drm_connector *connector;
8095 struct drm_connector_state *new_con_state;
8096 struct amdgpu_dm_connector *aconnector;
8097 struct dm_connector_state *dm_conn_state;
8098 int i, j, ret;
8099 int vcpi, pbn_div, pbn = 0, slot_num = 0;
8100
8101 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8102
8103 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8104 continue;
8105
8106 aconnector = to_amdgpu_dm_connector(connector);
8107
8108 if (!aconnector->mst_output_port)
8109 continue;
8110
8111 if (!new_con_state || !new_con_state->crtc)
8112 continue;
8113
8114 dm_conn_state = to_dm_connector_state(new_con_state);
8115
8116 for (j = 0; j < dc_state->stream_count; j++) {
8117 stream = dc_state->streams[j];
8118 if (!stream)
8119 continue;
8120
8121 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
8122 break;
8123
8124 stream = NULL;
8125 }
8126
8127 if (!stream)
8128 continue;
8129
8130 pbn_div = dm_mst_get_pbn_divider(stream->link);
8131 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
8132 for (j = 0; j < dc_state->stream_count; j++) {
8133 if (vars[j].aconnector == aconnector) {
8134 pbn = vars[j].pbn;
8135 break;
8136 }
8137 }
8138
8139 if (j == dc_state->stream_count || pbn_div == 0)
8140 continue;
8141
8142 slot_num = DIV_ROUND_UP(pbn, pbn_div);
8143
8144 if (stream->timing.flags.DSC != 1) {
8145 dm_conn_state->pbn = pbn;
8146 dm_conn_state->vcpi_slots = slot_num;
8147
8148 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
8149 dm_conn_state->pbn, false);
8150 if (ret < 0)
8151 return ret;
8152
8153 continue;
8154 }
8155
8156 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
8157 if (vcpi < 0)
8158 return vcpi;
8159
8160 dm_conn_state->pbn = pbn;
8161 dm_conn_state->vcpi_slots = vcpi;
8162 }
8163 return 0;
8164 }
8165
to_drm_connector_type(enum signal_type st)8166 static int to_drm_connector_type(enum signal_type st)
8167 {
8168 switch (st) {
8169 case SIGNAL_TYPE_HDMI_TYPE_A:
8170 return DRM_MODE_CONNECTOR_HDMIA;
8171 case SIGNAL_TYPE_EDP:
8172 return DRM_MODE_CONNECTOR_eDP;
8173 case SIGNAL_TYPE_LVDS:
8174 return DRM_MODE_CONNECTOR_LVDS;
8175 case SIGNAL_TYPE_RGB:
8176 return DRM_MODE_CONNECTOR_VGA;
8177 case SIGNAL_TYPE_DISPLAY_PORT:
8178 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8179 return DRM_MODE_CONNECTOR_DisplayPort;
8180 case SIGNAL_TYPE_DVI_DUAL_LINK:
8181 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8182 return DRM_MODE_CONNECTOR_DVID;
8183 case SIGNAL_TYPE_VIRTUAL:
8184 return DRM_MODE_CONNECTOR_VIRTUAL;
8185
8186 default:
8187 return DRM_MODE_CONNECTOR_Unknown;
8188 }
8189 }
8190
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)8191 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8192 {
8193 struct drm_encoder *encoder;
8194
8195 /* There is only one encoder per connector */
8196 drm_connector_for_each_possible_encoder(connector, encoder)
8197 return encoder;
8198
8199 return NULL;
8200 }
8201
amdgpu_dm_get_native_mode(struct drm_connector * connector)8202 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8203 {
8204 struct drm_encoder *encoder;
8205 struct amdgpu_encoder *amdgpu_encoder;
8206
8207 encoder = amdgpu_dm_connector_to_encoder(connector);
8208
8209 if (encoder == NULL)
8210 return;
8211
8212 amdgpu_encoder = to_amdgpu_encoder(encoder);
8213
8214 amdgpu_encoder->native_mode.clock = 0;
8215
8216 if (!list_empty(&connector->probed_modes)) {
8217 struct drm_display_mode *preferred_mode = NULL;
8218
8219 list_for_each_entry(preferred_mode,
8220 &connector->probed_modes,
8221 head) {
8222 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8223 amdgpu_encoder->native_mode = *preferred_mode;
8224
8225 break;
8226 }
8227
8228 }
8229 }
8230
8231 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)8232 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8233 char *name,
8234 int hdisplay, int vdisplay)
8235 {
8236 struct drm_device *dev = encoder->dev;
8237 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8238 struct drm_display_mode *mode = NULL;
8239 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8240
8241 mode = drm_mode_duplicate(dev, native_mode);
8242
8243 if (mode == NULL)
8244 return NULL;
8245
8246 mode->hdisplay = hdisplay;
8247 mode->vdisplay = vdisplay;
8248 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8249 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8250
8251 return mode;
8252
8253 }
8254
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)8255 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8256 struct drm_connector *connector)
8257 {
8258 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8259 struct drm_display_mode *mode = NULL;
8260 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8261 struct amdgpu_dm_connector *amdgpu_dm_connector =
8262 to_amdgpu_dm_connector(connector);
8263 int i;
8264 int n;
8265 struct mode_size {
8266 char name[DRM_DISPLAY_MODE_LEN];
8267 int w;
8268 int h;
8269 } common_modes[] = {
8270 { "640x480", 640, 480},
8271 { "800x600", 800, 600},
8272 { "1024x768", 1024, 768},
8273 { "1280x720", 1280, 720},
8274 { "1280x800", 1280, 800},
8275 {"1280x1024", 1280, 1024},
8276 { "1440x900", 1440, 900},
8277 {"1680x1050", 1680, 1050},
8278 {"1600x1200", 1600, 1200},
8279 {"1920x1080", 1920, 1080},
8280 {"1920x1200", 1920, 1200}
8281 };
8282
8283 if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
8284 (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
8285 return;
8286
8287 n = ARRAY_SIZE(common_modes);
8288
8289 for (i = 0; i < n; i++) {
8290 struct drm_display_mode *curmode = NULL;
8291 bool mode_existed = false;
8292
8293 if (common_modes[i].w > native_mode->hdisplay ||
8294 common_modes[i].h > native_mode->vdisplay ||
8295 (common_modes[i].w == native_mode->hdisplay &&
8296 common_modes[i].h == native_mode->vdisplay))
8297 continue;
8298
8299 list_for_each_entry(curmode, &connector->probed_modes, head) {
8300 if (common_modes[i].w == curmode->hdisplay &&
8301 common_modes[i].h == curmode->vdisplay) {
8302 mode_existed = true;
8303 break;
8304 }
8305 }
8306
8307 if (mode_existed)
8308 continue;
8309
8310 mode = amdgpu_dm_create_common_mode(encoder,
8311 common_modes[i].name, common_modes[i].w,
8312 common_modes[i].h);
8313 if (!mode)
8314 continue;
8315
8316 drm_mode_probed_add(connector, mode);
8317 amdgpu_dm_connector->num_modes++;
8318 }
8319 }
8320
amdgpu_set_panel_orientation(struct drm_connector * connector)8321 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8322 {
8323 struct drm_encoder *encoder;
8324 struct amdgpu_encoder *amdgpu_encoder;
8325 const struct drm_display_mode *native_mode;
8326
8327 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8328 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8329 return;
8330
8331 mutex_lock(&connector->dev->mode_config.mutex);
8332 amdgpu_dm_connector_get_modes(connector);
8333 mutex_unlock(&connector->dev->mode_config.mutex);
8334
8335 encoder = amdgpu_dm_connector_to_encoder(connector);
8336 if (!encoder)
8337 return;
8338
8339 amdgpu_encoder = to_amdgpu_encoder(encoder);
8340
8341 native_mode = &amdgpu_encoder->native_mode;
8342 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8343 return;
8344
8345 drm_connector_set_panel_orientation_with_quirk(connector,
8346 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8347 native_mode->hdisplay,
8348 native_mode->vdisplay);
8349 }
8350
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,const struct drm_edid * drm_edid)8351 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8352 const struct drm_edid *drm_edid)
8353 {
8354 struct amdgpu_dm_connector *amdgpu_dm_connector =
8355 to_amdgpu_dm_connector(connector);
8356
8357 if (drm_edid) {
8358 /* empty probed_modes */
8359 INIT_LIST_HEAD(&connector->probed_modes);
8360 amdgpu_dm_connector->num_modes =
8361 drm_edid_connector_add_modes(connector);
8362
8363 /* sorting the probed modes before calling function
8364 * amdgpu_dm_get_native_mode() since EDID can have
8365 * more than one preferred mode. The modes that are
8366 * later in the probed mode list could be of higher
8367 * and preferred resolution. For example, 3840x2160
8368 * resolution in base EDID preferred timing and 4096x2160
8369 * preferred resolution in DID extension block later.
8370 */
8371 drm_mode_sort(&connector->probed_modes);
8372 amdgpu_dm_get_native_mode(connector);
8373
8374 /* Freesync capabilities are reset by calling
8375 * drm_edid_connector_add_modes() and need to be
8376 * restored here.
8377 */
8378 amdgpu_dm_update_freesync_caps(connector, drm_edid);
8379 } else {
8380 amdgpu_dm_connector->num_modes = 0;
8381 }
8382 }
8383
is_duplicate_mode(struct amdgpu_dm_connector * aconnector,struct drm_display_mode * mode)8384 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8385 struct drm_display_mode *mode)
8386 {
8387 struct drm_display_mode *m;
8388
8389 list_for_each_entry(m, &aconnector->base.probed_modes, head) {
8390 if (drm_mode_equal(m, mode))
8391 return true;
8392 }
8393
8394 return false;
8395 }
8396
add_fs_modes(struct amdgpu_dm_connector * aconnector)8397 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8398 {
8399 const struct drm_display_mode *m;
8400 struct drm_display_mode *new_mode;
8401 uint i;
8402 u32 new_modes_count = 0;
8403
8404 /* Standard FPS values
8405 *
8406 * 23.976 - TV/NTSC
8407 * 24 - Cinema
8408 * 25 - TV/PAL
8409 * 29.97 - TV/NTSC
8410 * 30 - TV/NTSC
8411 * 48 - Cinema HFR
8412 * 50 - TV/PAL
8413 * 60 - Commonly used
8414 * 48,72,96,120 - Multiples of 24
8415 */
8416 static const u32 common_rates[] = {
8417 23976, 24000, 25000, 29970, 30000,
8418 48000, 50000, 60000, 72000, 96000, 120000
8419 };
8420
8421 /*
8422 * Find mode with highest refresh rate with the same resolution
8423 * as the preferred mode. Some monitors report a preferred mode
8424 * with lower resolution than the highest refresh rate supported.
8425 */
8426
8427 m = get_highest_refresh_rate_mode(aconnector, true);
8428 if (!m)
8429 return 0;
8430
8431 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8432 u64 target_vtotal, target_vtotal_diff;
8433 u64 num, den;
8434
8435 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8436 continue;
8437
8438 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8439 common_rates[i] > aconnector->max_vfreq * 1000)
8440 continue;
8441
8442 num = (unsigned long long)m->clock * 1000 * 1000;
8443 den = common_rates[i] * (unsigned long long)m->htotal;
8444 target_vtotal = div_u64(num, den);
8445 target_vtotal_diff = target_vtotal - m->vtotal;
8446
8447 /* Check for illegal modes */
8448 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8449 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8450 m->vtotal + target_vtotal_diff < m->vsync_end)
8451 continue;
8452
8453 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8454 if (!new_mode)
8455 goto out;
8456
8457 new_mode->vtotal += (u16)target_vtotal_diff;
8458 new_mode->vsync_start += (u16)target_vtotal_diff;
8459 new_mode->vsync_end += (u16)target_vtotal_diff;
8460 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8461 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8462
8463 if (!is_duplicate_mode(aconnector, new_mode)) {
8464 drm_mode_probed_add(&aconnector->base, new_mode);
8465 new_modes_count += 1;
8466 } else
8467 drm_mode_destroy(aconnector->base.dev, new_mode);
8468 }
8469 out:
8470 return new_modes_count;
8471 }
8472
amdgpu_dm_connector_add_freesync_modes(struct drm_connector * connector,const struct drm_edid * drm_edid)8473 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8474 const struct drm_edid *drm_edid)
8475 {
8476 struct amdgpu_dm_connector *amdgpu_dm_connector =
8477 to_amdgpu_dm_connector(connector);
8478
8479 if (!(amdgpu_freesync_vid_mode && drm_edid))
8480 return;
8481
8482 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8483 amdgpu_dm_connector->num_modes +=
8484 add_fs_modes(amdgpu_dm_connector);
8485 }
8486
amdgpu_dm_connector_get_modes(struct drm_connector * connector)8487 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8488 {
8489 struct amdgpu_dm_connector *amdgpu_dm_connector =
8490 to_amdgpu_dm_connector(connector);
8491 struct drm_encoder *encoder;
8492 const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
8493 struct dc_link_settings *verified_link_cap =
8494 &amdgpu_dm_connector->dc_link->verified_link_cap;
8495 const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
8496
8497 encoder = amdgpu_dm_connector_to_encoder(connector);
8498
8499 if (!drm_edid) {
8500 amdgpu_dm_connector->num_modes =
8501 drm_add_modes_noedid(connector, 640, 480);
8502 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
8503 amdgpu_dm_connector->num_modes +=
8504 drm_add_modes_noedid(connector, 1920, 1080);
8505 } else {
8506 amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
8507 if (encoder)
8508 amdgpu_dm_connector_add_common_modes(encoder, connector);
8509 amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
8510 }
8511 amdgpu_dm_fbc_init(connector);
8512
8513 return amdgpu_dm_connector->num_modes;
8514 }
8515
8516 static const u32 supported_colorspaces =
8517 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
8518 BIT(DRM_MODE_COLORIMETRY_OPRGB) |
8519 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
8520 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
8521
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)8522 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8523 struct amdgpu_dm_connector *aconnector,
8524 int connector_type,
8525 struct dc_link *link,
8526 int link_index)
8527 {
8528 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8529
8530 /*
8531 * Some of the properties below require access to state, like bpc.
8532 * Allocate some default initial connector state with our reset helper.
8533 */
8534 if (aconnector->base.funcs->reset)
8535 aconnector->base.funcs->reset(&aconnector->base);
8536
8537 aconnector->connector_id = link_index;
8538 aconnector->bl_idx = -1;
8539 aconnector->dc_link = link;
8540 aconnector->base.interlace_allowed = false;
8541 aconnector->base.doublescan_allowed = false;
8542 aconnector->base.stereo_allowed = false;
8543 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8544 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8545 aconnector->audio_inst = -1;
8546 aconnector->pack_sdp_v1_3 = false;
8547 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
8548 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
8549 mutex_init(&aconnector->hpd_lock);
8550 mutex_init(&aconnector->handle_mst_msg_ready);
8551
8552 /*
8553 * configure support HPD hot plug connector_>polled default value is 0
8554 * which means HPD hot plug not supported
8555 */
8556 switch (connector_type) {
8557 case DRM_MODE_CONNECTOR_HDMIA:
8558 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8559 aconnector->base.ycbcr_420_allowed =
8560 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8561 break;
8562 case DRM_MODE_CONNECTOR_DisplayPort:
8563 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8564 link->link_enc = link_enc_cfg_get_link_enc(link);
8565 ASSERT(link->link_enc);
8566 if (link->link_enc)
8567 aconnector->base.ycbcr_420_allowed =
8568 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8569 break;
8570 case DRM_MODE_CONNECTOR_DVID:
8571 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8572 break;
8573 default:
8574 break;
8575 }
8576
8577 drm_object_attach_property(&aconnector->base.base,
8578 dm->ddev->mode_config.scaling_mode_property,
8579 DRM_MODE_SCALE_NONE);
8580
8581 if (connector_type == DRM_MODE_CONNECTOR_HDMIA
8582 || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root))
8583 drm_connector_attach_broadcast_rgb_property(&aconnector->base);
8584
8585 drm_object_attach_property(&aconnector->base.base,
8586 adev->mode_info.underscan_property,
8587 UNDERSCAN_OFF);
8588 drm_object_attach_property(&aconnector->base.base,
8589 adev->mode_info.underscan_hborder_property,
8590 0);
8591 drm_object_attach_property(&aconnector->base.base,
8592 adev->mode_info.underscan_vborder_property,
8593 0);
8594
8595 if (!aconnector->mst_root)
8596 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8597
8598 aconnector->base.state->max_bpc = 16;
8599 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8600
8601 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
8602 /* Content Type is currently only implemented for HDMI. */
8603 drm_connector_attach_content_type_property(&aconnector->base);
8604 }
8605
8606 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
8607 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
8608 drm_connector_attach_colorspace_property(&aconnector->base);
8609 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
8610 connector_type == DRM_MODE_CONNECTOR_eDP) {
8611 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
8612 drm_connector_attach_colorspace_property(&aconnector->base);
8613 }
8614
8615 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8616 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8617 connector_type == DRM_MODE_CONNECTOR_eDP) {
8618 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8619
8620 if (!aconnector->mst_root)
8621 drm_connector_attach_vrr_capable_property(&aconnector->base);
8622
8623 if (adev->dm.hdcp_workqueue)
8624 drm_connector_attach_content_protection_property(&aconnector->base, true);
8625 }
8626
8627 if (connector_type == DRM_MODE_CONNECTOR_eDP) {
8628 struct drm_privacy_screen *privacy_screen;
8629
8630 privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
8631 if (!IS_ERR(privacy_screen)) {
8632 drm_connector_attach_privacy_screen_provider(&aconnector->base,
8633 privacy_screen);
8634 } else if (PTR_ERR(privacy_screen) != -ENODEV) {
8635 drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
8636 }
8637 }
8638 }
8639
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)8640 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8641 struct i2c_msg *msgs, int num)
8642 {
8643 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8644 struct ddc_service *ddc_service = i2c->ddc_service;
8645 struct i2c_command cmd;
8646 int i;
8647 int result = -EIO;
8648
8649 if (!ddc_service->ddc_pin)
8650 return result;
8651
8652 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8653
8654 if (!cmd.payloads)
8655 return result;
8656
8657 cmd.number_of_payloads = num;
8658 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8659 cmd.speed = 100;
8660
8661 for (i = 0; i < num; i++) {
8662 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8663 cmd.payloads[i].address = msgs[i].addr;
8664 cmd.payloads[i].length = msgs[i].len;
8665 cmd.payloads[i].data = msgs[i].buf;
8666 }
8667
8668 if (i2c->oem) {
8669 if (dc_submit_i2c_oem(
8670 ddc_service->ctx->dc,
8671 &cmd))
8672 result = num;
8673 } else {
8674 if (dc_submit_i2c(
8675 ddc_service->ctx->dc,
8676 ddc_service->link->link_index,
8677 &cmd))
8678 result = num;
8679 }
8680
8681 kfree(cmd.payloads);
8682 return result;
8683 }
8684
amdgpu_dm_i2c_func(struct i2c_adapter * adap)8685 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8686 {
8687 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8688 }
8689
8690 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8691 .master_xfer = amdgpu_dm_i2c_xfer,
8692 .functionality = amdgpu_dm_i2c_func,
8693 };
8694
8695 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,bool oem)8696 create_i2c(struct ddc_service *ddc_service, bool oem)
8697 {
8698 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8699 struct amdgpu_i2c_adapter *i2c;
8700
8701 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8702 if (!i2c)
8703 return NULL;
8704 i2c->base.owner = THIS_MODULE;
8705 i2c->base.dev.parent = &adev->pdev->dev;
8706 i2c->base.algo = &amdgpu_dm_i2c_algo;
8707 if (oem)
8708 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus");
8709 else
8710 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d",
8711 ddc_service->link->link_index);
8712 i2c_set_adapdata(&i2c->base, i2c);
8713 i2c->ddc_service = ddc_service;
8714 i2c->oem = oem;
8715
8716 return i2c;
8717 }
8718
amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector * aconnector)8719 int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
8720 {
8721 struct cec_connector_info conn_info;
8722 struct drm_device *ddev = aconnector->base.dev;
8723 struct device *hdmi_dev = ddev->dev;
8724
8725 if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
8726 drm_info(ddev, "HDMI-CEC feature masked\n");
8727 return -EINVAL;
8728 }
8729
8730 cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
8731 aconnector->notifier =
8732 cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
8733 if (!aconnector->notifier) {
8734 drm_err(ddev, "Failed to create cec notifier\n");
8735 return -ENOMEM;
8736 }
8737
8738 return 0;
8739 }
8740
8741 /*
8742 * Note: this function assumes that dc_link_detect() was called for the
8743 * dc_link which will be represented by this aconnector.
8744 */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,u32 link_index,struct amdgpu_encoder * aencoder)8745 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8746 struct amdgpu_dm_connector *aconnector,
8747 u32 link_index,
8748 struct amdgpu_encoder *aencoder)
8749 {
8750 int res = 0;
8751 int connector_type;
8752 struct dc *dc = dm->dc;
8753 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8754 struct amdgpu_i2c_adapter *i2c;
8755
8756 /* Not needed for writeback connector */
8757 link->priv = aconnector;
8758
8759
8760 i2c = create_i2c(link->ddc, false);
8761 if (!i2c) {
8762 drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
8763 return -ENOMEM;
8764 }
8765
8766 aconnector->i2c = i2c;
8767 res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
8768
8769 if (res) {
8770 drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
8771 goto out_free;
8772 }
8773
8774 connector_type = to_drm_connector_type(link->connector_signal);
8775
8776 res = drm_connector_init_with_ddc(
8777 dm->ddev,
8778 &aconnector->base,
8779 &amdgpu_dm_connector_funcs,
8780 connector_type,
8781 &i2c->base);
8782
8783 if (res) {
8784 drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
8785 aconnector->connector_id = -1;
8786 goto out_free;
8787 }
8788
8789 drm_connector_helper_add(
8790 &aconnector->base,
8791 &amdgpu_dm_connector_helper_funcs);
8792
8793 amdgpu_dm_connector_init_helper(
8794 dm,
8795 aconnector,
8796 connector_type,
8797 link,
8798 link_index);
8799
8800 drm_connector_attach_encoder(
8801 &aconnector->base, &aencoder->base);
8802
8803 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8804 connector_type == DRM_MODE_CONNECTOR_HDMIB)
8805 amdgpu_dm_initialize_hdmi_connector(aconnector);
8806
8807 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8808 || connector_type == DRM_MODE_CONNECTOR_eDP)
8809 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8810
8811 out_free:
8812 if (res) {
8813 kfree(i2c);
8814 aconnector->i2c = NULL;
8815 }
8816 return res;
8817 }
8818
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)8819 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8820 {
8821 switch (adev->mode_info.num_crtc) {
8822 case 1:
8823 return 0x1;
8824 case 2:
8825 return 0x3;
8826 case 3:
8827 return 0x7;
8828 case 4:
8829 return 0xf;
8830 case 5:
8831 return 0x1f;
8832 case 6:
8833 default:
8834 return 0x3f;
8835 }
8836 }
8837
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)8838 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8839 struct amdgpu_encoder *aencoder,
8840 uint32_t link_index)
8841 {
8842 struct amdgpu_device *adev = drm_to_adev(dev);
8843
8844 int res = drm_encoder_init(dev,
8845 &aencoder->base,
8846 &amdgpu_dm_encoder_funcs,
8847 DRM_MODE_ENCODER_TMDS,
8848 NULL);
8849
8850 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8851
8852 if (!res)
8853 aencoder->encoder_id = link_index;
8854 else
8855 aencoder->encoder_id = -1;
8856
8857 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8858
8859 return res;
8860 }
8861
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dm_crtc_state * acrtc_state)8862 static void manage_dm_interrupts(struct amdgpu_device *adev,
8863 struct amdgpu_crtc *acrtc,
8864 struct dm_crtc_state *acrtc_state)
8865 { /*
8866 * We cannot be sure that the frontend index maps to the same
8867 * backend index - some even map to more than one.
8868 * So we have to go through the CRTC to find the right IRQ.
8869 */
8870 int irq_type = amdgpu_display_crtc_idx_to_irq_type(
8871 adev,
8872 acrtc->crtc_id);
8873 struct drm_device *dev = adev_to_drm(adev);
8874
8875 struct drm_vblank_crtc_config config = {0};
8876 struct dc_crtc_timing *timing;
8877 int offdelay;
8878
8879 if (acrtc_state) {
8880 timing = &acrtc_state->stream->timing;
8881
8882 /*
8883 * Depending on when the HW latching event of double-buffered
8884 * registers happen relative to the PSR SDP deadline, and how
8885 * bad the Panel clock has drifted since the last ALPM off
8886 * event, there can be up to 3 frames of delay between sending
8887 * the PSR exit cmd to DMUB fw, and when the panel starts
8888 * displaying live frames.
8889 *
8890 * We can set:
8891 *
8892 * 20/100 * offdelay_ms = 3_frames_ms
8893 * => offdelay_ms = 5 * 3_frames_ms
8894 *
8895 * This ensures that `3_frames_ms` will only be experienced as a
8896 * 20% delay on top how long the display has been static, and
8897 * thus make the delay less perceivable.
8898 */
8899 if (acrtc_state->stream->link->psr_settings.psr_version <
8900 DC_PSR_VERSION_UNSUPPORTED) {
8901 offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 *
8902 timing->v_total *
8903 timing->h_total,
8904 timing->pix_clk_100hz);
8905 config.offdelay_ms = offdelay ?: 30;
8906 } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
8907 IP_VERSION(3, 5, 0) ||
8908 !(adev->flags & AMD_IS_APU)) {
8909 /*
8910 * Older HW and DGPU have issues with instant off;
8911 * use a 2 frame offdelay.
8912 */
8913 offdelay = DIV64_U64_ROUND_UP((u64)20 *
8914 timing->v_total *
8915 timing->h_total,
8916 timing->pix_clk_100hz);
8917
8918 config.offdelay_ms = offdelay ?: 30;
8919 } else {
8920 /* offdelay_ms = 0 will never disable vblank */
8921 config.offdelay_ms = 1;
8922 config.disable_immediate = true;
8923 }
8924
8925 drm_crtc_vblank_on_config(&acrtc->base,
8926 &config);
8927 /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
8928 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
8929 case IP_VERSION(3, 0, 0):
8930 case IP_VERSION(3, 0, 2):
8931 case IP_VERSION(3, 0, 3):
8932 case IP_VERSION(3, 2, 0):
8933 if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
8934 drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
8935 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8936 if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
8937 drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
8938 #endif
8939 }
8940
8941 } else {
8942 /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
8943 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
8944 case IP_VERSION(3, 0, 0):
8945 case IP_VERSION(3, 0, 2):
8946 case IP_VERSION(3, 0, 3):
8947 case IP_VERSION(3, 2, 0):
8948 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8949 if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
8950 drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
8951 #endif
8952 if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
8953 drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
8954 }
8955
8956 drm_crtc_vblank_off(&acrtc->base);
8957 }
8958 }
8959
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)8960 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8961 struct amdgpu_crtc *acrtc)
8962 {
8963 int irq_type =
8964 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8965
8966 /**
8967 * This reads the current state for the IRQ and force reapplies
8968 * the setting to hardware.
8969 */
8970 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8971 }
8972
8973 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)8974 is_scaling_state_different(const struct dm_connector_state *dm_state,
8975 const struct dm_connector_state *old_dm_state)
8976 {
8977 if (dm_state->scaling != old_dm_state->scaling)
8978 return true;
8979 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8980 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8981 return true;
8982 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8983 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8984 return true;
8985 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8986 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8987 return true;
8988 return false;
8989 }
8990
is_content_protection_different(struct drm_crtc_state * new_crtc_state,struct drm_crtc_state * old_crtc_state,struct drm_connector_state * new_conn_state,struct drm_connector_state * old_conn_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)8991 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
8992 struct drm_crtc_state *old_crtc_state,
8993 struct drm_connector_state *new_conn_state,
8994 struct drm_connector_state *old_conn_state,
8995 const struct drm_connector *connector,
8996 struct hdcp_workqueue *hdcp_w)
8997 {
8998 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8999 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
9000
9001 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
9002 connector->index, connector->status, connector->dpms);
9003 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
9004 old_conn_state->content_protection, new_conn_state->content_protection);
9005
9006 if (old_crtc_state)
9007 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9008 old_crtc_state->enable,
9009 old_crtc_state->active,
9010 old_crtc_state->mode_changed,
9011 old_crtc_state->active_changed,
9012 old_crtc_state->connectors_changed);
9013
9014 if (new_crtc_state)
9015 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9016 new_crtc_state->enable,
9017 new_crtc_state->active,
9018 new_crtc_state->mode_changed,
9019 new_crtc_state->active_changed,
9020 new_crtc_state->connectors_changed);
9021
9022 /* hdcp content type change */
9023 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
9024 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
9025 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9026 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
9027 return true;
9028 }
9029
9030 /* CP is being re enabled, ignore this */
9031 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
9032 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9033 if (new_crtc_state && new_crtc_state->mode_changed) {
9034 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9035 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
9036 return true;
9037 }
9038 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
9039 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
9040 return false;
9041 }
9042
9043 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9044 *
9045 * Handles: UNDESIRED -> ENABLED
9046 */
9047 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9048 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9049 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9050
9051 /* Stream removed and re-enabled
9052 *
9053 * Can sometimes overlap with the HPD case,
9054 * thus set update_hdcp to false to avoid
9055 * setting HDCP multiple times.
9056 *
9057 * Handles: DESIRED -> DESIRED (Special case)
9058 */
9059 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
9060 new_conn_state->crtc && new_conn_state->crtc->enabled &&
9061 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9062 dm_con_state->update_hdcp = false;
9063 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
9064 __func__);
9065 return true;
9066 }
9067
9068 /* Hot-plug, headless s3, dpms
9069 *
9070 * Only start HDCP if the display is connected/enabled.
9071 * update_hdcp flag will be set to false until the next
9072 * HPD comes in.
9073 *
9074 * Handles: DESIRED -> DESIRED (Special case)
9075 */
9076 if (dm_con_state->update_hdcp &&
9077 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9078 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9079 dm_con_state->update_hdcp = false;
9080 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
9081 __func__);
9082 return true;
9083 }
9084
9085 if (old_conn_state->content_protection == new_conn_state->content_protection) {
9086 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9087 if (new_crtc_state && new_crtc_state->mode_changed) {
9088 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
9089 __func__);
9090 return true;
9091 }
9092 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
9093 __func__);
9094 return false;
9095 }
9096
9097 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
9098 return false;
9099 }
9100
9101 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9102 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
9103 __func__);
9104 return true;
9105 }
9106
9107 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
9108 return false;
9109 }
9110
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)9111 static void remove_stream(struct amdgpu_device *adev,
9112 struct amdgpu_crtc *acrtc,
9113 struct dc_stream_state *stream)
9114 {
9115 /* this is the update mode case */
9116
9117 acrtc->otg_inst = -1;
9118 acrtc->enabled = false;
9119 }
9120
prepare_flip_isr(struct amdgpu_crtc * acrtc)9121 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9122 {
9123
9124 assert_spin_locked(&acrtc->base.dev->event_lock);
9125 WARN_ON(acrtc->event);
9126
9127 acrtc->event = acrtc->base.state->event;
9128
9129 /* Set the flip status */
9130 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9131
9132 /* Mark this event as consumed */
9133 acrtc->base.state->event = NULL;
9134
9135 drm_dbg_state(acrtc->base.dev,
9136 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9137 acrtc->crtc_id);
9138 }
9139
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)9140 static void update_freesync_state_on_stream(
9141 struct amdgpu_display_manager *dm,
9142 struct dm_crtc_state *new_crtc_state,
9143 struct dc_stream_state *new_stream,
9144 struct dc_plane_state *surface,
9145 u32 flip_timestamp_in_us)
9146 {
9147 struct mod_vrr_params vrr_params;
9148 struct dc_info_packet vrr_infopacket = {0};
9149 struct amdgpu_device *adev = dm->adev;
9150 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9151 unsigned long flags;
9152 bool pack_sdp_v1_3 = false;
9153 struct amdgpu_dm_connector *aconn;
9154 enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
9155
9156 if (!new_stream)
9157 return;
9158
9159 /*
9160 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9161 * For now it's sufficient to just guard against these conditions.
9162 */
9163
9164 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9165 return;
9166
9167 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9168 vrr_params = acrtc->dm_irq_params.vrr_params;
9169
9170 if (surface) {
9171 mod_freesync_handle_preflip(
9172 dm->freesync_module,
9173 surface,
9174 new_stream,
9175 flip_timestamp_in_us,
9176 &vrr_params);
9177
9178 if (adev->family < AMDGPU_FAMILY_AI &&
9179 amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
9180 mod_freesync_handle_v_update(dm->freesync_module,
9181 new_stream, &vrr_params);
9182
9183 /* Need to call this before the frame ends. */
9184 dc_stream_adjust_vmin_vmax(dm->dc,
9185 new_crtc_state->stream,
9186 &vrr_params.adjust);
9187 }
9188 }
9189
9190 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
9191
9192 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
9193 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
9194
9195 if (aconn->vsdb_info.amd_vsdb_version == 1)
9196 packet_type = PACKET_TYPE_FS_V1;
9197 else if (aconn->vsdb_info.amd_vsdb_version == 2)
9198 packet_type = PACKET_TYPE_FS_V2;
9199 else if (aconn->vsdb_info.amd_vsdb_version == 3)
9200 packet_type = PACKET_TYPE_FS_V3;
9201
9202 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
9203 &new_stream->adaptive_sync_infopacket);
9204 }
9205
9206 mod_freesync_build_vrr_infopacket(
9207 dm->freesync_module,
9208 new_stream,
9209 &vrr_params,
9210 packet_type,
9211 TRANSFER_FUNC_UNKNOWN,
9212 &vrr_infopacket,
9213 pack_sdp_v1_3);
9214
9215 new_crtc_state->freesync_vrr_info_changed |=
9216 (memcmp(&new_crtc_state->vrr_infopacket,
9217 &vrr_infopacket,
9218 sizeof(vrr_infopacket)) != 0);
9219
9220 acrtc->dm_irq_params.vrr_params = vrr_params;
9221 new_crtc_state->vrr_infopacket = vrr_infopacket;
9222
9223 new_stream->vrr_infopacket = vrr_infopacket;
9224 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
9225
9226 if (new_crtc_state->freesync_vrr_info_changed)
9227 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9228 new_crtc_state->base.crtc->base.id,
9229 (int)new_crtc_state->base.vrr_enabled,
9230 (int)vrr_params.state);
9231
9232 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9233 }
9234
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)9235 static void update_stream_irq_parameters(
9236 struct amdgpu_display_manager *dm,
9237 struct dm_crtc_state *new_crtc_state)
9238 {
9239 struct dc_stream_state *new_stream = new_crtc_state->stream;
9240 struct mod_vrr_params vrr_params;
9241 struct mod_freesync_config config = new_crtc_state->freesync_config;
9242 struct amdgpu_device *adev = dm->adev;
9243 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9244 unsigned long flags;
9245
9246 if (!new_stream)
9247 return;
9248
9249 /*
9250 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9251 * For now it's sufficient to just guard against these conditions.
9252 */
9253 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9254 return;
9255
9256 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9257 vrr_params = acrtc->dm_irq_params.vrr_params;
9258
9259 if (new_crtc_state->vrr_supported &&
9260 config.min_refresh_in_uhz &&
9261 config.max_refresh_in_uhz) {
9262 /*
9263 * if freesync compatible mode was set, config.state will be set
9264 * in atomic check
9265 */
9266 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9267 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9268 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9269 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9270 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9271 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9272 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9273 } else {
9274 config.state = new_crtc_state->base.vrr_enabled ?
9275 VRR_STATE_ACTIVE_VARIABLE :
9276 VRR_STATE_INACTIVE;
9277 }
9278 } else {
9279 config.state = VRR_STATE_UNSUPPORTED;
9280 }
9281
9282 mod_freesync_build_vrr_params(dm->freesync_module,
9283 new_stream,
9284 &config, &vrr_params);
9285
9286 new_crtc_state->freesync_config = config;
9287 /* Copy state for access from DM IRQ handler */
9288 acrtc->dm_irq_params.freesync_config = config;
9289 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9290 acrtc->dm_irq_params.vrr_params = vrr_params;
9291 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9292 }
9293
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)9294 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9295 struct dm_crtc_state *new_state)
9296 {
9297 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
9298 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
9299
9300 if (!old_vrr_active && new_vrr_active) {
9301 /* Transition VRR inactive -> active:
9302 * While VRR is active, we must not disable vblank irq, as a
9303 * reenable after disable would compute bogus vblank/pflip
9304 * timestamps if it likely happened inside display front-porch.
9305 *
9306 * We also need vupdate irq for the actual core vblank handling
9307 * at end of vblank.
9308 */
9309 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
9310 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
9311 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
9312 __func__, new_state->base.crtc->base.id);
9313 } else if (old_vrr_active && !new_vrr_active) {
9314 /* Transition VRR active -> inactive:
9315 * Allow vblank irq disable again for fixed refresh rate.
9316 */
9317 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
9318 drm_crtc_vblank_put(new_state->base.crtc);
9319 drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
9320 __func__, new_state->base.crtc->base.id);
9321 }
9322 }
9323
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)9324 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9325 {
9326 struct drm_plane *plane;
9327 struct drm_plane_state *old_plane_state;
9328 int i;
9329
9330 /*
9331 * TODO: Make this per-stream so we don't issue redundant updates for
9332 * commits with multiple streams.
9333 */
9334 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9335 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9336 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
9337 }
9338
get_mem_type(struct drm_framebuffer * fb)9339 static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
9340 {
9341 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
9342
9343 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
9344 }
9345
amdgpu_dm_update_cursor(struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct dc_stream_update * update)9346 static void amdgpu_dm_update_cursor(struct drm_plane *plane,
9347 struct drm_plane_state *old_plane_state,
9348 struct dc_stream_update *update)
9349 {
9350 struct amdgpu_device *adev = drm_to_adev(plane->dev);
9351 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9352 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9353 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9354 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9355 uint64_t address = afb ? afb->address : 0;
9356 struct dc_cursor_position position = {0};
9357 struct dc_cursor_attributes attributes;
9358 int ret;
9359
9360 if (!plane->state->fb && !old_plane_state->fb)
9361 return;
9362
9363 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
9364 amdgpu_crtc->crtc_id, plane->state->crtc_w,
9365 plane->state->crtc_h);
9366
9367 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
9368 if (ret)
9369 return;
9370
9371 if (!position.enable) {
9372 /* turn off cursor */
9373 if (crtc_state && crtc_state->stream) {
9374 dc_stream_set_cursor_position(crtc_state->stream,
9375 &position);
9376 update->cursor_position = &crtc_state->stream->cursor_position;
9377 }
9378 return;
9379 }
9380
9381 amdgpu_crtc->cursor_width = plane->state->crtc_w;
9382 amdgpu_crtc->cursor_height = plane->state->crtc_h;
9383
9384 memset(&attributes, 0, sizeof(attributes));
9385 attributes.address.high_part = upper_32_bits(address);
9386 attributes.address.low_part = lower_32_bits(address);
9387 attributes.width = plane->state->crtc_w;
9388 attributes.height = plane->state->crtc_h;
9389 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9390 attributes.rotation_angle = 0;
9391 attributes.attribute_flags.value = 0;
9392
9393 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
9394 * legacy gamma setup.
9395 */
9396 if (crtc_state->cm_is_degamma_srgb &&
9397 adev->dm.dc->caps.color.dpp.gamma_corr)
9398 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
9399
9400 if (afb)
9401 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9402
9403 if (crtc_state->stream) {
9404 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9405 &attributes))
9406 drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
9407
9408 update->cursor_attributes = &crtc_state->stream->cursor_attributes;
9409
9410 if (!dc_stream_set_cursor_position(crtc_state->stream,
9411 &position))
9412 drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
9413
9414 update->cursor_position = &crtc_state->stream->cursor_position;
9415 }
9416 }
9417
amdgpu_dm_enable_self_refresh(struct amdgpu_crtc * acrtc_attach,const struct dm_crtc_state * acrtc_state,const u64 current_ts)9418 static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
9419 const struct dm_crtc_state *acrtc_state,
9420 const u64 current_ts)
9421 {
9422 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
9423 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
9424 struct amdgpu_dm_connector *aconn =
9425 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9426 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
9427
9428 if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
9429 if (pr->config.replay_supported && !pr->replay_feature_enabled)
9430 amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
9431 else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9432 !psr->psr_feature_enabled)
9433 if (!aconn->disallow_edp_enter_psr)
9434 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9435 }
9436
9437 /* Decrement skip count when SR is enabled and we're doing fast updates. */
9438 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9439 (psr->psr_feature_enabled || pr->config.replay_supported)) {
9440 if (aconn->sr_skip_count > 0)
9441 aconn->sr_skip_count--;
9442
9443 /* Allow SR when skip count is 0. */
9444 acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
9445
9446 /*
9447 * If sink supports PSR SU/Panel Replay, there is no need to rely on
9448 * a vblank event disable request to enable PSR/RP. PSR SU/RP
9449 * can be enabled immediately once OS demonstrates an
9450 * adequate number of fast atomic commits to notify KMD
9451 * of update events. See `vblank_control_worker()`.
9452 */
9453 if (!vrr_active &&
9454 acrtc_attach->dm_irq_params.allow_sr_entry &&
9455 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
9456 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
9457 #endif
9458 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
9459 if (pr->replay_feature_enabled && !pr->replay_allow_active)
9460 amdgpu_dm_replay_enable(acrtc_state->stream, true);
9461 if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
9462 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
9463 amdgpu_dm_psr_enable(acrtc_state->stream);
9464 }
9465 } else {
9466 acrtc_attach->dm_irq_params.allow_sr_entry = false;
9467 }
9468 }
9469
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)9470 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9471 struct drm_device *dev,
9472 struct amdgpu_display_manager *dm,
9473 struct drm_crtc *pcrtc,
9474 bool wait_for_vblank)
9475 {
9476 u32 i;
9477 u64 timestamp_ns = ktime_get_ns();
9478 struct drm_plane *plane;
9479 struct drm_plane_state *old_plane_state, *new_plane_state;
9480 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9481 struct drm_crtc_state *new_pcrtc_state =
9482 drm_atomic_get_new_crtc_state(state, pcrtc);
9483 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9484 struct dm_crtc_state *dm_old_crtc_state =
9485 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9486 int planes_count = 0, vpos, hpos;
9487 unsigned long flags;
9488 u32 target_vblank, last_flip_vblank;
9489 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
9490 bool cursor_update = false;
9491 bool pflip_present = false;
9492 bool dirty_rects_changed = false;
9493 bool updated_planes_and_streams = false;
9494 struct {
9495 struct dc_surface_update surface_updates[MAX_SURFACES];
9496 struct dc_plane_info plane_infos[MAX_SURFACES];
9497 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9498 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9499 struct dc_stream_update stream_update;
9500 } *bundle;
9501
9502 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9503
9504 if (!bundle) {
9505 drm_err(dev, "Failed to allocate update bundle\n");
9506 goto cleanup;
9507 }
9508
9509 /*
9510 * Disable the cursor first if we're disabling all the planes.
9511 * It'll remain on the screen after the planes are re-enabled
9512 * if we don't.
9513 *
9514 * If the cursor is transitioning from native to overlay mode, the
9515 * native cursor needs to be disabled first.
9516 */
9517 if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
9518 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
9519 struct dc_cursor_position cursor_position = {0};
9520
9521 if (!dc_stream_set_cursor_position(acrtc_state->stream,
9522 &cursor_position))
9523 drm_err(dev, "DC failed to disable native cursor\n");
9524
9525 bundle->stream_update.cursor_position =
9526 &acrtc_state->stream->cursor_position;
9527 }
9528
9529 if (acrtc_state->active_planes == 0 &&
9530 dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
9531 amdgpu_dm_commit_cursors(state);
9532
9533 /* update planes when needed */
9534 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9535 struct drm_crtc *crtc = new_plane_state->crtc;
9536 struct drm_crtc_state *new_crtc_state;
9537 struct drm_framebuffer *fb = new_plane_state->fb;
9538 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9539 bool plane_needs_flip;
9540 struct dc_plane_state *dc_plane;
9541 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9542
9543 /* Cursor plane is handled after stream updates */
9544 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
9545 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
9546 if ((fb && crtc == pcrtc) ||
9547 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) {
9548 cursor_update = true;
9549 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0)
9550 amdgpu_dm_update_cursor(plane, old_plane_state, &bundle->stream_update);
9551 }
9552
9553 continue;
9554 }
9555
9556 if (!fb || !crtc || pcrtc != crtc)
9557 continue;
9558
9559 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9560 if (!new_crtc_state->active)
9561 continue;
9562
9563 dc_plane = dm_new_plane_state->dc_state;
9564 if (!dc_plane)
9565 continue;
9566
9567 bundle->surface_updates[planes_count].surface = dc_plane;
9568 if (new_pcrtc_state->color_mgmt_changed) {
9569 bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
9570 bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
9571 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9572 bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
9573 bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
9574 bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
9575 bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
9576 }
9577
9578 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
9579 &bundle->scaling_infos[planes_count]);
9580
9581 bundle->surface_updates[planes_count].scaling_info =
9582 &bundle->scaling_infos[planes_count];
9583
9584 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9585
9586 pflip_present = pflip_present || plane_needs_flip;
9587
9588 if (!plane_needs_flip) {
9589 planes_count += 1;
9590 continue;
9591 }
9592
9593 fill_dc_plane_info_and_addr(
9594 dm->adev, new_plane_state,
9595 afb->tiling_flags,
9596 &bundle->plane_infos[planes_count],
9597 &bundle->flip_addrs[planes_count].address,
9598 afb->tmz_surface);
9599
9600 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9601 new_plane_state->plane->index,
9602 bundle->plane_infos[planes_count].dcc.enable);
9603
9604 bundle->surface_updates[planes_count].plane_info =
9605 &bundle->plane_infos[planes_count];
9606
9607 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
9608 acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
9609 fill_dc_dirty_rects(plane, old_plane_state,
9610 new_plane_state, new_crtc_state,
9611 &bundle->flip_addrs[planes_count],
9612 acrtc_state->stream->link->psr_settings.psr_version ==
9613 DC_PSR_VERSION_SU_1,
9614 &dirty_rects_changed);
9615
9616 /*
9617 * If the dirty regions changed, PSR-SU need to be disabled temporarily
9618 * and enabled it again after dirty regions are stable to avoid video glitch.
9619 * PSR-SU will be enabled in vblank_control_worker() if user pause the video
9620 * during the PSR-SU was disabled.
9621 */
9622 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9623 acrtc_attach->dm_irq_params.allow_sr_entry &&
9624 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
9625 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
9626 #endif
9627 dirty_rects_changed) {
9628 mutex_lock(&dm->dc_lock);
9629 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
9630 timestamp_ns;
9631 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
9632 amdgpu_dm_psr_disable(acrtc_state->stream, true);
9633 mutex_unlock(&dm->dc_lock);
9634 }
9635 }
9636
9637 /*
9638 * Only allow immediate flips for fast updates that don't
9639 * change memory domain, FB pitch, DCC state, rotation or
9640 * mirroring.
9641 *
9642 * dm_crtc_helper_atomic_check() only accepts async flips with
9643 * fast updates.
9644 */
9645 if (crtc->state->async_flip &&
9646 (acrtc_state->update_type != UPDATE_TYPE_FAST ||
9647 get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
9648 drm_warn_once(state->dev,
9649 "[PLANE:%d:%s] async flip with non-fast update\n",
9650 plane->base.id, plane->name);
9651
9652 bundle->flip_addrs[planes_count].flip_immediate =
9653 crtc->state->async_flip &&
9654 acrtc_state->update_type == UPDATE_TYPE_FAST &&
9655 get_mem_type(old_plane_state->fb) == get_mem_type(fb);
9656
9657 timestamp_ns = ktime_get_ns();
9658 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9659 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9660 bundle->surface_updates[planes_count].surface = dc_plane;
9661
9662 if (!bundle->surface_updates[planes_count].surface) {
9663 drm_err(dev, "No surface for CRTC: id=%d\n",
9664 acrtc_attach->crtc_id);
9665 continue;
9666 }
9667
9668 if (plane == pcrtc->primary)
9669 update_freesync_state_on_stream(
9670 dm,
9671 acrtc_state,
9672 acrtc_state->stream,
9673 dc_plane,
9674 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9675
9676 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9677 __func__,
9678 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9679 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9680
9681 planes_count += 1;
9682
9683 }
9684
9685 if (pflip_present) {
9686 if (!vrr_active) {
9687 /* Use old throttling in non-vrr fixed refresh rate mode
9688 * to keep flip scheduling based on target vblank counts
9689 * working in a backwards compatible way, e.g., for
9690 * clients using the GLX_OML_sync_control extension or
9691 * DRI3/Present extension with defined target_msc.
9692 */
9693 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9694 } else {
9695 /* For variable refresh rate mode only:
9696 * Get vblank of last completed flip to avoid > 1 vrr
9697 * flips per video frame by use of throttling, but allow
9698 * flip programming anywhere in the possibly large
9699 * variable vrr vblank interval for fine-grained flip
9700 * timing control and more opportunity to avoid stutter
9701 * on late submission of flips.
9702 */
9703 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9704 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9705 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9706 }
9707
9708 target_vblank = last_flip_vblank + wait_for_vblank;
9709
9710 /*
9711 * Wait until we're out of the vertical blank period before the one
9712 * targeted by the flip
9713 */
9714 while ((acrtc_attach->enabled &&
9715 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9716 0, &vpos, &hpos, NULL,
9717 NULL, &pcrtc->hwmode)
9718 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9719 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9720 (int)(target_vblank -
9721 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9722 usleep_range(1000, 1100);
9723 }
9724
9725 /**
9726 * Prepare the flip event for the pageflip interrupt to handle.
9727 *
9728 * This only works in the case where we've already turned on the
9729 * appropriate hardware blocks (eg. HUBP) so in the transition case
9730 * from 0 -> n planes we have to skip a hardware generated event
9731 * and rely on sending it from software.
9732 */
9733 if (acrtc_attach->base.state->event &&
9734 acrtc_state->active_planes > 0) {
9735 drm_crtc_vblank_get(pcrtc);
9736
9737 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9738
9739 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9740 prepare_flip_isr(acrtc_attach);
9741
9742 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9743 }
9744
9745 if (acrtc_state->stream) {
9746 if (acrtc_state->freesync_vrr_info_changed)
9747 bundle->stream_update.vrr_infopacket =
9748 &acrtc_state->stream->vrr_infopacket;
9749 }
9750 } else if (cursor_update && acrtc_state->active_planes > 0) {
9751 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9752 if (acrtc_attach->base.state->event) {
9753 drm_crtc_vblank_get(pcrtc);
9754 acrtc_attach->event = acrtc_attach->base.state->event;
9755 acrtc_attach->base.state->event = NULL;
9756 }
9757 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9758 }
9759
9760 /* Update the planes if changed or disable if we don't have any. */
9761 if ((planes_count || acrtc_state->active_planes == 0) &&
9762 acrtc_state->stream) {
9763 /*
9764 * If PSR or idle optimizations are enabled then flush out
9765 * any pending work before hardware programming.
9766 */
9767 if (dm->vblank_control_workqueue)
9768 flush_workqueue(dm->vblank_control_workqueue);
9769
9770 bundle->stream_update.stream = acrtc_state->stream;
9771 if (new_pcrtc_state->mode_changed) {
9772 bundle->stream_update.src = acrtc_state->stream->src;
9773 bundle->stream_update.dst = acrtc_state->stream->dst;
9774 }
9775
9776 if (new_pcrtc_state->color_mgmt_changed) {
9777 /*
9778 * TODO: This isn't fully correct since we've actually
9779 * already modified the stream in place.
9780 */
9781 bundle->stream_update.gamut_remap =
9782 &acrtc_state->stream->gamut_remap_matrix;
9783 bundle->stream_update.output_csc_transform =
9784 &acrtc_state->stream->csc_color_matrix;
9785 bundle->stream_update.out_transfer_func =
9786 &acrtc_state->stream->out_transfer_func;
9787 bundle->stream_update.lut3d_func =
9788 (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
9789 bundle->stream_update.func_shaper =
9790 (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
9791 }
9792
9793 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9794 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9795 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9796
9797 mutex_lock(&dm->dc_lock);
9798 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
9799 if (acrtc_state->stream->link->replay_settings.replay_allow_active)
9800 amdgpu_dm_replay_disable(acrtc_state->stream);
9801 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
9802 amdgpu_dm_psr_disable(acrtc_state->stream, true);
9803 }
9804 mutex_unlock(&dm->dc_lock);
9805
9806 /*
9807 * If FreeSync state on the stream has changed then we need to
9808 * re-adjust the min/max bounds now that DC doesn't handle this
9809 * as part of commit.
9810 */
9811 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9812 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9813 dc_stream_adjust_vmin_vmax(
9814 dm->dc, acrtc_state->stream,
9815 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9816 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9817 }
9818 mutex_lock(&dm->dc_lock);
9819 update_planes_and_stream_adapter(dm->dc,
9820 acrtc_state->update_type,
9821 planes_count,
9822 acrtc_state->stream,
9823 &bundle->stream_update,
9824 bundle->surface_updates);
9825 updated_planes_and_streams = true;
9826
9827 /**
9828 * Enable or disable the interrupts on the backend.
9829 *
9830 * Most pipes are put into power gating when unused.
9831 *
9832 * When power gating is enabled on a pipe we lose the
9833 * interrupt enablement state when power gating is disabled.
9834 *
9835 * So we need to update the IRQ control state in hardware
9836 * whenever the pipe turns on (since it could be previously
9837 * power gated) or off (since some pipes can't be power gated
9838 * on some ASICs).
9839 */
9840 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9841 dm_update_pflip_irq_state(drm_to_adev(dev),
9842 acrtc_attach);
9843
9844 amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
9845 mutex_unlock(&dm->dc_lock);
9846 }
9847
9848 /*
9849 * Update cursor state *after* programming all the planes.
9850 * This avoids redundant programming in the case where we're going
9851 * to be disabling a single plane - those pipes are being disabled.
9852 */
9853 if (acrtc_state->active_planes &&
9854 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
9855 acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
9856 amdgpu_dm_commit_cursors(state);
9857
9858 cleanup:
9859 kfree(bundle);
9860 }
9861
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)9862 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9863 struct drm_atomic_state *state)
9864 {
9865 struct amdgpu_device *adev = drm_to_adev(dev);
9866 struct amdgpu_dm_connector *aconnector;
9867 struct drm_connector *connector;
9868 struct drm_connector_state *old_con_state, *new_con_state;
9869 struct drm_crtc_state *new_crtc_state;
9870 struct dm_crtc_state *new_dm_crtc_state;
9871 const struct dc_stream_status *status;
9872 int i, inst;
9873
9874 /* Notify device removals. */
9875 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9876 if (old_con_state->crtc != new_con_state->crtc) {
9877 /* CRTC changes require notification. */
9878 goto notify;
9879 }
9880
9881 if (!new_con_state->crtc)
9882 continue;
9883
9884 new_crtc_state = drm_atomic_get_new_crtc_state(
9885 state, new_con_state->crtc);
9886
9887 if (!new_crtc_state)
9888 continue;
9889
9890 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9891 continue;
9892
9893 notify:
9894 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9895 continue;
9896
9897 aconnector = to_amdgpu_dm_connector(connector);
9898
9899 mutex_lock(&adev->dm.audio_lock);
9900 inst = aconnector->audio_inst;
9901 aconnector->audio_inst = -1;
9902 mutex_unlock(&adev->dm.audio_lock);
9903
9904 amdgpu_dm_audio_eld_notify(adev, inst);
9905 }
9906
9907 /* Notify audio device additions. */
9908 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9909 if (!new_con_state->crtc)
9910 continue;
9911
9912 new_crtc_state = drm_atomic_get_new_crtc_state(
9913 state, new_con_state->crtc);
9914
9915 if (!new_crtc_state)
9916 continue;
9917
9918 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9919 continue;
9920
9921 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9922 if (!new_dm_crtc_state->stream)
9923 continue;
9924
9925 status = dc_stream_get_status(new_dm_crtc_state->stream);
9926 if (!status)
9927 continue;
9928
9929 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9930 continue;
9931
9932 aconnector = to_amdgpu_dm_connector(connector);
9933
9934 mutex_lock(&adev->dm.audio_lock);
9935 inst = status->audio_inst;
9936 aconnector->audio_inst = inst;
9937 mutex_unlock(&adev->dm.audio_lock);
9938
9939 amdgpu_dm_audio_eld_notify(adev, inst);
9940 }
9941 }
9942
9943 /*
9944 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9945 * @crtc_state: the DRM CRTC state
9946 * @stream_state: the DC stream state.
9947 *
9948 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9949 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9950 */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)9951 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9952 struct dc_stream_state *stream_state)
9953 {
9954 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9955 }
9956
dm_clear_writeback(struct amdgpu_display_manager * dm,struct dm_crtc_state * crtc_state)9957 static void dm_clear_writeback(struct amdgpu_display_manager *dm,
9958 struct dm_crtc_state *crtc_state)
9959 {
9960 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
9961 }
9962
amdgpu_dm_commit_streams(struct drm_atomic_state * state,struct dc_state * dc_state)9963 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
9964 struct dc_state *dc_state)
9965 {
9966 struct drm_device *dev = state->dev;
9967 struct amdgpu_device *adev = drm_to_adev(dev);
9968 struct amdgpu_display_manager *dm = &adev->dm;
9969 struct drm_crtc *crtc;
9970 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9971 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9972 struct drm_connector_state *old_con_state;
9973 struct drm_connector *connector;
9974 bool mode_set_reset_required = false;
9975 u32 i;
9976 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
9977 bool set_backlight_level = false;
9978
9979 /* Disable writeback */
9980 for_each_old_connector_in_state(state, connector, old_con_state, i) {
9981 struct dm_connector_state *dm_old_con_state;
9982 struct amdgpu_crtc *acrtc;
9983
9984 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
9985 continue;
9986
9987 old_crtc_state = NULL;
9988
9989 dm_old_con_state = to_dm_connector_state(old_con_state);
9990 if (!dm_old_con_state->base.crtc)
9991 continue;
9992
9993 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
9994 if (acrtc)
9995 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9996
9997 if (!acrtc || !acrtc->wb_enabled)
9998 continue;
9999
10000 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10001
10002 dm_clear_writeback(dm, dm_old_crtc_state);
10003 acrtc->wb_enabled = false;
10004 }
10005
10006 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10007 new_crtc_state, i) {
10008 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10009
10010 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10011
10012 if (old_crtc_state->active &&
10013 (!new_crtc_state->active ||
10014 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10015 manage_dm_interrupts(adev, acrtc, NULL);
10016 dc_stream_release(dm_old_crtc_state->stream);
10017 }
10018 }
10019
10020 drm_atomic_helper_calc_timestamping_constants(state);
10021
10022 /* update changed items */
10023 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10024 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10025
10026 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10027 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10028
10029 drm_dbg_state(state->dev,
10030 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
10031 acrtc->crtc_id,
10032 new_crtc_state->enable,
10033 new_crtc_state->active,
10034 new_crtc_state->planes_changed,
10035 new_crtc_state->mode_changed,
10036 new_crtc_state->active_changed,
10037 new_crtc_state->connectors_changed);
10038
10039 /* Disable cursor if disabling crtc */
10040 if (old_crtc_state->active && !new_crtc_state->active) {
10041 struct dc_cursor_position position;
10042
10043 memset(&position, 0, sizeof(position));
10044 mutex_lock(&dm->dc_lock);
10045 dc_exit_ips_for_hw_access(dm->dc);
10046 dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
10047 mutex_unlock(&dm->dc_lock);
10048 }
10049
10050 /* Copy all transient state flags into dc state */
10051 if (dm_new_crtc_state->stream) {
10052 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
10053 dm_new_crtc_state->stream);
10054 }
10055
10056 /* handles headless hotplug case, updating new_state and
10057 * aconnector as needed
10058 */
10059
10060 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
10061
10062 drm_dbg_atomic(dev,
10063 "Atomic commit: SET crtc id %d: [%p]\n",
10064 acrtc->crtc_id, acrtc);
10065
10066 if (!dm_new_crtc_state->stream) {
10067 /*
10068 * this could happen because of issues with
10069 * userspace notifications delivery.
10070 * In this case userspace tries to set mode on
10071 * display which is disconnected in fact.
10072 * dc_sink is NULL in this case on aconnector.
10073 * We expect reset mode will come soon.
10074 *
10075 * This can also happen when unplug is done
10076 * during resume sequence ended
10077 *
10078 * In this case, we want to pretend we still
10079 * have a sink to keep the pipe running so that
10080 * hw state is consistent with the sw state
10081 */
10082 drm_dbg_atomic(dev,
10083 "Failed to create new stream for crtc %d\n",
10084 acrtc->base.base.id);
10085 continue;
10086 }
10087
10088 if (dm_old_crtc_state->stream)
10089 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
10090
10091 pm_runtime_get_noresume(dev->dev);
10092
10093 acrtc->enabled = true;
10094 acrtc->hw_mode = new_crtc_state->mode;
10095 crtc->hwmode = new_crtc_state->mode;
10096 mode_set_reset_required = true;
10097 set_backlight_level = true;
10098 } else if (modereset_required(new_crtc_state)) {
10099 drm_dbg_atomic(dev,
10100 "Atomic commit: RESET. crtc id %d:[%p]\n",
10101 acrtc->crtc_id, acrtc);
10102 /* i.e. reset mode */
10103 if (dm_old_crtc_state->stream)
10104 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
10105
10106 mode_set_reset_required = true;
10107 }
10108 } /* for_each_crtc_in_state() */
10109
10110 /* if there mode set or reset, disable eDP PSR, Replay */
10111 if (mode_set_reset_required) {
10112 if (dm->vblank_control_workqueue)
10113 flush_workqueue(dm->vblank_control_workqueue);
10114
10115 amdgpu_dm_replay_disable_all(dm);
10116 amdgpu_dm_psr_disable_all(dm);
10117 }
10118
10119 dm_enable_per_frame_crtc_master_sync(dc_state);
10120 mutex_lock(&dm->dc_lock);
10121 dc_exit_ips_for_hw_access(dm->dc);
10122 WARN_ON(!dc_commit_streams(dm->dc, ¶ms));
10123
10124 /* Allow idle optimization when vblank count is 0 for display off */
10125 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev))
10126 dc_allow_idle_optimizations(dm->dc, true);
10127 mutex_unlock(&dm->dc_lock);
10128
10129 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10130 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10131
10132 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10133
10134 if (dm_new_crtc_state->stream != NULL) {
10135 const struct dc_stream_status *status =
10136 dc_stream_get_status(dm_new_crtc_state->stream);
10137
10138 if (!status)
10139 status = dc_state_get_stream_status(dc_state,
10140 dm_new_crtc_state->stream);
10141 if (!status)
10142 drm_err(dev,
10143 "got no status for stream %p on acrtc%p\n",
10144 dm_new_crtc_state->stream, acrtc);
10145 else
10146 acrtc->otg_inst = status->primary_otg_inst;
10147 }
10148 }
10149
10150 /* During boot up and resume the DC layer will reset the panel brightness
10151 * to fix a flicker issue.
10152 * It will cause the dm->actual_brightness is not the current panel brightness
10153 * level. (the dm->brightness is the correct panel level)
10154 * So we set the backlight level with dm->brightness value after set mode
10155 */
10156 if (set_backlight_level) {
10157 for (i = 0; i < dm->num_of_edps; i++) {
10158 if (dm->backlight_dev[i])
10159 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10160 }
10161 }
10162 }
10163
dm_set_writeback(struct amdgpu_display_manager * dm,struct dm_crtc_state * crtc_state,struct drm_connector * connector,struct drm_connector_state * new_con_state)10164 static void dm_set_writeback(struct amdgpu_display_manager *dm,
10165 struct dm_crtc_state *crtc_state,
10166 struct drm_connector *connector,
10167 struct drm_connector_state *new_con_state)
10168 {
10169 struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
10170 struct amdgpu_device *adev = dm->adev;
10171 struct amdgpu_crtc *acrtc;
10172 struct dc_writeback_info *wb_info;
10173 struct pipe_ctx *pipe = NULL;
10174 struct amdgpu_framebuffer *afb;
10175 int i = 0;
10176
10177 wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
10178 if (!wb_info) {
10179 drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
10180 return;
10181 }
10182
10183 acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
10184 if (!acrtc) {
10185 drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
10186 kfree(wb_info);
10187 return;
10188 }
10189
10190 afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
10191 if (!afb) {
10192 drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
10193 kfree(wb_info);
10194 return;
10195 }
10196
10197 for (i = 0; i < MAX_PIPES; i++) {
10198 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
10199 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
10200 break;
10201 }
10202 }
10203
10204 /* fill in wb_info */
10205 wb_info->wb_enabled = true;
10206
10207 wb_info->dwb_pipe_inst = 0;
10208 wb_info->dwb_params.dwbscl_black_color = 0;
10209 wb_info->dwb_params.hdr_mult = 0x1F000;
10210 wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
10211 wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
10212 wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
10213 wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
10214
10215 /* width & height from crtc */
10216 wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
10217 wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
10218 wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
10219 wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
10220
10221 wb_info->dwb_params.cnv_params.crop_en = false;
10222 wb_info->dwb_params.stereo_params.stereo_enabled = false;
10223
10224 wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
10225 wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
10226 wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
10227 wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
10228
10229 wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
10230
10231 wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
10232
10233 wb_info->dwb_params.scaler_taps.h_taps = 4;
10234 wb_info->dwb_params.scaler_taps.v_taps = 4;
10235 wb_info->dwb_params.scaler_taps.h_taps_c = 2;
10236 wb_info->dwb_params.scaler_taps.v_taps_c = 2;
10237 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
10238
10239 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
10240 wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
10241
10242 for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
10243 wb_info->mcif_buf_params.luma_address[i] = afb->address;
10244 wb_info->mcif_buf_params.chroma_address[i] = 0;
10245 }
10246
10247 wb_info->mcif_buf_params.p_vmid = 1;
10248 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
10249 wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
10250 wb_info->mcif_warmup_params.region_size =
10251 wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
10252 }
10253 wb_info->mcif_warmup_params.p_vmid = 1;
10254 wb_info->writeback_source_plane = pipe->plane_state;
10255
10256 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
10257
10258 acrtc->wb_pending = true;
10259 acrtc->wb_conn = wb_conn;
10260 drm_writeback_queue_job(wb_conn, new_con_state);
10261 }
10262
amdgpu_dm_update_hdcp(struct drm_atomic_state * state)10263 static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
10264 {
10265 struct drm_connector_state *old_con_state, *new_con_state;
10266 struct drm_device *dev = state->dev;
10267 struct drm_connector *connector;
10268 struct amdgpu_device *adev = drm_to_adev(dev);
10269 int i;
10270
10271 if (!adev->dm.hdcp_workqueue)
10272 return;
10273
10274 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10275 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10276 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10277 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10278 struct dm_crtc_state *dm_new_crtc_state;
10279 struct amdgpu_dm_connector *aconnector;
10280
10281 if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10282 continue;
10283
10284 aconnector = to_amdgpu_dm_connector(connector);
10285
10286 drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
10287
10288 drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
10289 connector->index, connector->status, connector->dpms);
10290 drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
10291 old_con_state->content_protection, new_con_state->content_protection);
10292
10293 if (aconnector->dc_sink) {
10294 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
10295 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
10296 drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
10297 aconnector->dc_sink->edid_caps.display_name);
10298 }
10299 }
10300
10301 new_crtc_state = NULL;
10302 old_crtc_state = NULL;
10303
10304 if (acrtc) {
10305 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10306 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10307 }
10308
10309 if (old_crtc_state)
10310 drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
10311 old_crtc_state->enable,
10312 old_crtc_state->active,
10313 old_crtc_state->mode_changed,
10314 old_crtc_state->active_changed,
10315 old_crtc_state->connectors_changed);
10316
10317 if (new_crtc_state)
10318 drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
10319 new_crtc_state->enable,
10320 new_crtc_state->active,
10321 new_crtc_state->mode_changed,
10322 new_crtc_state->active_changed,
10323 new_crtc_state->connectors_changed);
10324
10325
10326 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10327
10328 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10329 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10330 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10331 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10332 dm_new_con_state->update_hdcp = true;
10333 continue;
10334 }
10335
10336 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
10337 old_con_state, connector, adev->dm.hdcp_workqueue)) {
10338 /* when display is unplugged from mst hub, connctor will
10339 * be destroyed within dm_dp_mst_connector_destroy. connector
10340 * hdcp perperties, like type, undesired, desired, enabled,
10341 * will be lost. So, save hdcp properties into hdcp_work within
10342 * amdgpu_dm_atomic_commit_tail. if the same display is
10343 * plugged back with same display index, its hdcp properties
10344 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
10345 */
10346
10347 bool enable_encryption = false;
10348
10349 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
10350 enable_encryption = true;
10351
10352 if (aconnector->dc_link && aconnector->dc_sink &&
10353 aconnector->dc_link->type == dc_connection_mst_branch) {
10354 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
10355 struct hdcp_workqueue *hdcp_w =
10356 &hdcp_work[aconnector->dc_link->link_index];
10357
10358 hdcp_w->hdcp_content_type[connector->index] =
10359 new_con_state->hdcp_content_type;
10360 hdcp_w->content_protection[connector->index] =
10361 new_con_state->content_protection;
10362 }
10363
10364 if (new_crtc_state && new_crtc_state->mode_changed &&
10365 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
10366 enable_encryption = true;
10367
10368 drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
10369
10370 if (aconnector->dc_link)
10371 hdcp_update_display(
10372 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10373 new_con_state->hdcp_content_type, enable_encryption);
10374 }
10375 }
10376 }
10377
amdgpu_dm_atomic_setup_commit(struct drm_atomic_state * state)10378 static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
10379 {
10380 struct drm_crtc *crtc;
10381 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10382 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10383 int i, ret;
10384
10385 ret = drm_dp_mst_atomic_setup_commit(state);
10386 if (ret)
10387 return ret;
10388
10389 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10390 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10391 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10392 /*
10393 * Color management settings. We also update color properties
10394 * when a modeset is needed, to ensure it gets reprogrammed.
10395 */
10396 if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
10397 (dm_new_crtc_state->base.color_mgmt_changed ||
10398 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
10399 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10400 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10401 if (ret) {
10402 drm_dbg_atomic(state->dev, "Failed to update color state\n");
10403 return ret;
10404 }
10405 }
10406 }
10407
10408 return 0;
10409 }
10410
10411 /**
10412 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
10413 * @state: The atomic state to commit
10414 *
10415 * This will tell DC to commit the constructed DC state from atomic_check,
10416 * programming the hardware. Any failures here implies a hardware failure, since
10417 * atomic check should have filtered anything non-kosher.
10418 */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)10419 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
10420 {
10421 struct drm_device *dev = state->dev;
10422 struct amdgpu_device *adev = drm_to_adev(dev);
10423 struct amdgpu_display_manager *dm = &adev->dm;
10424 struct dm_atomic_state *dm_state;
10425 struct dc_state *dc_state = NULL;
10426 u32 i, j;
10427 struct drm_crtc *crtc;
10428 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10429 unsigned long flags;
10430 bool wait_for_vblank = true;
10431 struct drm_connector *connector;
10432 struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
10433 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10434 int crtc_disable_count = 0;
10435
10436 trace_amdgpu_dm_atomic_commit_tail_begin(state);
10437
10438 drm_atomic_helper_update_legacy_modeset_state(dev, state);
10439 drm_dp_mst_atomic_wait_for_dependencies(state);
10440
10441 dm_state = dm_atomic_get_new_state(state);
10442 if (dm_state && dm_state->context) {
10443 dc_state = dm_state->context;
10444 amdgpu_dm_commit_streams(state, dc_state);
10445 }
10446
10447 amdgpu_dm_update_hdcp(state);
10448
10449 /* Handle connector state changes */
10450 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10451 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10452 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10453 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10454 struct dc_surface_update *dummy_updates;
10455 struct dc_stream_update stream_update;
10456 struct dc_info_packet hdr_packet;
10457 struct dc_stream_status *status = NULL;
10458 bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false;
10459
10460 memset(&stream_update, 0, sizeof(stream_update));
10461
10462 if (acrtc) {
10463 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10464 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10465 }
10466
10467 /* Skip any modesets/resets */
10468 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10469 continue;
10470
10471 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10472 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10473
10474 scaling_changed = is_scaling_state_different(dm_new_con_state,
10475 dm_old_con_state);
10476
10477 if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) &&
10478 (dm_old_crtc_state->stream->output_color_space !=
10479 get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state)))
10480 output_color_space_changed = true;
10481
10482 abm_changed = dm_new_crtc_state->abm_level !=
10483 dm_old_crtc_state->abm_level;
10484
10485 hdr_changed =
10486 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10487
10488 if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed)
10489 continue;
10490
10491 stream_update.stream = dm_new_crtc_state->stream;
10492 if (scaling_changed) {
10493 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10494 dm_new_con_state, dm_new_crtc_state->stream);
10495
10496 stream_update.src = dm_new_crtc_state->stream->src;
10497 stream_update.dst = dm_new_crtc_state->stream->dst;
10498 }
10499
10500 if (output_color_space_changed) {
10501 dm_new_crtc_state->stream->output_color_space
10502 = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state);
10503
10504 stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space;
10505 }
10506
10507 if (abm_changed) {
10508 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10509
10510 stream_update.abm_level = &dm_new_crtc_state->abm_level;
10511 }
10512
10513 if (hdr_changed) {
10514 fill_hdr_info_packet(new_con_state, &hdr_packet);
10515 stream_update.hdr_static_metadata = &hdr_packet;
10516 }
10517
10518 status = dc_stream_get_status(dm_new_crtc_state->stream);
10519
10520 if (WARN_ON(!status))
10521 continue;
10522
10523 WARN_ON(!status->plane_count);
10524
10525 /*
10526 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10527 * Here we create an empty update on each plane.
10528 * To fix this, DC should permit updating only stream properties.
10529 */
10530 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
10531 if (!dummy_updates) {
10532 drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
10533 continue;
10534 }
10535 for (j = 0; j < status->plane_count; j++)
10536 dummy_updates[j].surface = status->plane_states[0];
10537
10538 sort(dummy_updates, status->plane_count,
10539 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
10540
10541 mutex_lock(&dm->dc_lock);
10542 dc_exit_ips_for_hw_access(dm->dc);
10543 dc_update_planes_and_stream(dm->dc,
10544 dummy_updates,
10545 status->plane_count,
10546 dm_new_crtc_state->stream,
10547 &stream_update);
10548 mutex_unlock(&dm->dc_lock);
10549 kfree(dummy_updates);
10550
10551 drm_connector_update_privacy_screen(new_con_state);
10552 }
10553
10554 /**
10555 * Enable interrupts for CRTCs that are newly enabled or went through
10556 * a modeset. It was intentionally deferred until after the front end
10557 * state was modified to wait until the OTG was on and so the IRQ
10558 * handlers didn't access stale or invalid state.
10559 */
10560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10561 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10562 #ifdef CONFIG_DEBUG_FS
10563 enum amdgpu_dm_pipe_crc_source cur_crc_src;
10564 #endif
10565 /* Count number of newly disabled CRTCs for dropping PM refs later. */
10566 if (old_crtc_state->active && !new_crtc_state->active)
10567 crtc_disable_count++;
10568
10569 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10570 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10571
10572 /* For freesync config update on crtc state and params for irq */
10573 update_stream_irq_parameters(dm, dm_new_crtc_state);
10574
10575 #ifdef CONFIG_DEBUG_FS
10576 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10577 cur_crc_src = acrtc->dm_irq_params.crc_src;
10578 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10579 #endif
10580
10581 if (new_crtc_state->active &&
10582 (!old_crtc_state->active ||
10583 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10584 dc_stream_retain(dm_new_crtc_state->stream);
10585 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10586 manage_dm_interrupts(adev, acrtc, dm_new_crtc_state);
10587 }
10588 /* Handle vrr on->off / off->on transitions */
10589 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
10590
10591 #ifdef CONFIG_DEBUG_FS
10592 if (new_crtc_state->active &&
10593 (!old_crtc_state->active ||
10594 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10595 /**
10596 * Frontend may have changed so reapply the CRC capture
10597 * settings for the stream.
10598 */
10599 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10601 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10602 uint8_t cnt;
10603
10604 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10605 for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
10606 if (acrtc->dm_irq_params.window_param[cnt].enable) {
10607 acrtc->dm_irq_params.window_param[cnt].update_win = true;
10608
10609 /**
10610 * It takes 2 frames for HW to stably generate CRC when
10611 * resuming from suspend, so we set skip_frame_cnt 2.
10612 */
10613 acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
10614 }
10615 }
10616 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10617 }
10618 #endif
10619 if (amdgpu_dm_crtc_configure_crc_source(
10620 crtc, dm_new_crtc_state, cur_crc_src))
10621 drm_dbg_atomic(dev, "Failed to configure crc source");
10622 }
10623 }
10624 #endif
10625 }
10626
10627 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10628 if (new_crtc_state->async_flip)
10629 wait_for_vblank = false;
10630
10631 /* update planes when needed per crtc*/
10632 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10633 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10634
10635 if (dm_new_crtc_state->stream)
10636 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
10637 }
10638
10639 /* Enable writeback */
10640 for_each_new_connector_in_state(state, connector, new_con_state, i) {
10641 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10642 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10643
10644 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
10645 continue;
10646
10647 if (!new_con_state->writeback_job)
10648 continue;
10649
10650 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10651
10652 if (!new_crtc_state)
10653 continue;
10654
10655 if (acrtc->wb_enabled)
10656 continue;
10657
10658 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10659
10660 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
10661 acrtc->wb_enabled = true;
10662 }
10663
10664 /* Update audio instances for each connector. */
10665 amdgpu_dm_commit_audio(dev, state);
10666
10667 /* restore the backlight level */
10668 for (i = 0; i < dm->num_of_edps; i++) {
10669 if (dm->backlight_dev[i] &&
10670 (dm->actual_brightness[i] != dm->brightness[i]))
10671 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10672 }
10673
10674 /*
10675 * send vblank event on all events not handled in flip and
10676 * mark consumed event for drm_atomic_helper_commit_hw_done
10677 */
10678 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10679 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10680
10681 if (new_crtc_state->event)
10682 drm_send_event_locked(dev, &new_crtc_state->event->base);
10683
10684 new_crtc_state->event = NULL;
10685 }
10686 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10687
10688 /* Signal HW programming completion */
10689 drm_atomic_helper_commit_hw_done(state);
10690
10691 if (wait_for_vblank)
10692 drm_atomic_helper_wait_for_flip_done(dev, state);
10693
10694 drm_atomic_helper_cleanup_planes(dev, state);
10695
10696 /* Don't free the memory if we are hitting this as part of suspend.
10697 * This way we don't free any memory during suspend; see
10698 * amdgpu_bo_free_kernel(). The memory will be freed in the first
10699 * non-suspend modeset or when the driver is torn down.
10700 */
10701 if (!adev->in_suspend) {
10702 /* return the stolen vga memory back to VRAM */
10703 if (!adev->mman.keep_stolen_vga_memory)
10704 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10705 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10706 }
10707
10708 /*
10709 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10710 * so we can put the GPU into runtime suspend if we're not driving any
10711 * displays anymore
10712 */
10713 for (i = 0; i < crtc_disable_count; i++)
10714 pm_runtime_put_autosuspend(dev->dev);
10715 pm_runtime_mark_last_busy(dev->dev);
10716
10717 trace_amdgpu_dm_atomic_commit_tail_finish(state);
10718 }
10719
dm_force_atomic_commit(struct drm_connector * connector)10720 static int dm_force_atomic_commit(struct drm_connector *connector)
10721 {
10722 int ret = 0;
10723 struct drm_device *ddev = connector->dev;
10724 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10725 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10726 struct drm_plane *plane = disconnected_acrtc->base.primary;
10727 struct drm_connector_state *conn_state;
10728 struct drm_crtc_state *crtc_state;
10729 struct drm_plane_state *plane_state;
10730
10731 if (!state)
10732 return -ENOMEM;
10733
10734 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10735
10736 /* Construct an atomic state to restore previous display setting */
10737
10738 /*
10739 * Attach connectors to drm_atomic_state
10740 */
10741 conn_state = drm_atomic_get_connector_state(state, connector);
10742
10743 /* Check for error in getting connector state */
10744 if (IS_ERR(conn_state)) {
10745 ret = PTR_ERR(conn_state);
10746 goto out;
10747 }
10748
10749 /* Attach crtc to drm_atomic_state*/
10750 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10751
10752 /* Check for error in getting crtc state */
10753 if (IS_ERR(crtc_state)) {
10754 ret = PTR_ERR(crtc_state);
10755 goto out;
10756 }
10757
10758 /* force a restore */
10759 crtc_state->mode_changed = true;
10760
10761 /* Attach plane to drm_atomic_state */
10762 plane_state = drm_atomic_get_plane_state(state, plane);
10763
10764 /* Check for error in getting plane state */
10765 if (IS_ERR(plane_state)) {
10766 ret = PTR_ERR(plane_state);
10767 goto out;
10768 }
10769
10770 /* Call commit internally with the state we just constructed */
10771 ret = drm_atomic_commit(state);
10772
10773 out:
10774 drm_atomic_state_put(state);
10775 if (ret)
10776 drm_err(ddev, "Restoring old state failed with %i\n", ret);
10777
10778 return ret;
10779 }
10780
10781 /*
10782 * This function handles all cases when set mode does not come upon hotplug.
10783 * This includes when a display is unplugged then plugged back into the
10784 * same port and when running without usermode desktop manager supprot
10785 */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)10786 void dm_restore_drm_connector_state(struct drm_device *dev,
10787 struct drm_connector *connector)
10788 {
10789 struct amdgpu_dm_connector *aconnector;
10790 struct amdgpu_crtc *disconnected_acrtc;
10791 struct dm_crtc_state *acrtc_state;
10792
10793 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10794 return;
10795
10796 aconnector = to_amdgpu_dm_connector(connector);
10797
10798 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10799 return;
10800
10801 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10802 if (!disconnected_acrtc)
10803 return;
10804
10805 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10806 if (!acrtc_state->stream)
10807 return;
10808
10809 /*
10810 * If the previous sink is not released and different from the current,
10811 * we deduce we are in a state where we can not rely on usermode call
10812 * to turn on the display, so we do it here
10813 */
10814 if (acrtc_state->stream->sink != aconnector->dc_sink)
10815 dm_force_atomic_commit(&aconnector->base);
10816 }
10817
10818 /*
10819 * Grabs all modesetting locks to serialize against any blocking commits,
10820 * Waits for completion of all non blocking commits.
10821 */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)10822 static int do_aquire_global_lock(struct drm_device *dev,
10823 struct drm_atomic_state *state)
10824 {
10825 struct drm_crtc *crtc;
10826 struct drm_crtc_commit *commit;
10827 long ret;
10828
10829 /*
10830 * Adding all modeset locks to aquire_ctx will
10831 * ensure that when the framework release it the
10832 * extra locks we are locking here will get released to
10833 */
10834 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10835 if (ret)
10836 return ret;
10837
10838 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10839 spin_lock(&crtc->commit_lock);
10840 commit = list_first_entry_or_null(&crtc->commit_list,
10841 struct drm_crtc_commit, commit_entry);
10842 if (commit)
10843 drm_crtc_commit_get(commit);
10844 spin_unlock(&crtc->commit_lock);
10845
10846 if (!commit)
10847 continue;
10848
10849 /*
10850 * Make sure all pending HW programming completed and
10851 * page flips done
10852 */
10853 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10854
10855 if (ret > 0)
10856 ret = wait_for_completion_interruptible_timeout(
10857 &commit->flip_done, 10*HZ);
10858
10859 if (ret == 0)
10860 drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
10861 crtc->base.id, crtc->name);
10862
10863 drm_crtc_commit_put(commit);
10864 }
10865
10866 return ret < 0 ? ret : 0;
10867 }
10868
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)10869 static void get_freesync_config_for_crtc(
10870 struct dm_crtc_state *new_crtc_state,
10871 struct dm_connector_state *new_con_state)
10872 {
10873 struct mod_freesync_config config = {0};
10874 struct amdgpu_dm_connector *aconnector;
10875 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10876 int vrefresh = drm_mode_vrefresh(mode);
10877 bool fs_vid_mode = false;
10878
10879 if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10880 return;
10881
10882 aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
10883
10884 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10885 vrefresh >= aconnector->min_vfreq &&
10886 vrefresh <= aconnector->max_vfreq;
10887
10888 if (new_crtc_state->vrr_supported) {
10889 new_crtc_state->stream->ignore_msa_timing_param = true;
10890 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10891
10892 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10893 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10894 config.vsif_supported = true;
10895 config.btr = true;
10896
10897 if (fs_vid_mode) {
10898 config.state = VRR_STATE_ACTIVE_FIXED;
10899 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10900 goto out;
10901 } else if (new_crtc_state->base.vrr_enabled) {
10902 config.state = VRR_STATE_ACTIVE_VARIABLE;
10903 } else {
10904 config.state = VRR_STATE_INACTIVE;
10905 }
10906 } else {
10907 config.state = VRR_STATE_UNSUPPORTED;
10908 }
10909 out:
10910 new_crtc_state->freesync_config = config;
10911 }
10912
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)10913 static void reset_freesync_config_for_crtc(
10914 struct dm_crtc_state *new_crtc_state)
10915 {
10916 new_crtc_state->vrr_supported = false;
10917
10918 memset(&new_crtc_state->vrr_infopacket, 0,
10919 sizeof(new_crtc_state->vrr_infopacket));
10920 }
10921
10922 static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state)10923 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10924 struct drm_crtc_state *new_crtc_state)
10925 {
10926 const struct drm_display_mode *old_mode, *new_mode;
10927
10928 if (!old_crtc_state || !new_crtc_state)
10929 return false;
10930
10931 old_mode = &old_crtc_state->mode;
10932 new_mode = &new_crtc_state->mode;
10933
10934 if (old_mode->clock == new_mode->clock &&
10935 old_mode->hdisplay == new_mode->hdisplay &&
10936 old_mode->vdisplay == new_mode->vdisplay &&
10937 old_mode->htotal == new_mode->htotal &&
10938 old_mode->vtotal != new_mode->vtotal &&
10939 old_mode->hsync_start == new_mode->hsync_start &&
10940 old_mode->vsync_start != new_mode->vsync_start &&
10941 old_mode->hsync_end == new_mode->hsync_end &&
10942 old_mode->vsync_end != new_mode->vsync_end &&
10943 old_mode->hskew == new_mode->hskew &&
10944 old_mode->vscan == new_mode->vscan &&
10945 (old_mode->vsync_end - old_mode->vsync_start) ==
10946 (new_mode->vsync_end - new_mode->vsync_start))
10947 return true;
10948
10949 return false;
10950 }
10951
set_freesync_fixed_config(struct dm_crtc_state * dm_new_crtc_state)10952 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
10953 {
10954 u64 num, den, res;
10955 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10956
10957 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10958
10959 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10960 den = (unsigned long long)new_crtc_state->mode.htotal *
10961 (unsigned long long)new_crtc_state->mode.vtotal;
10962
10963 res = div_u64(num, den);
10964 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10965 }
10966
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)10967 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10968 struct drm_atomic_state *state,
10969 struct drm_crtc *crtc,
10970 struct drm_crtc_state *old_crtc_state,
10971 struct drm_crtc_state *new_crtc_state,
10972 bool enable,
10973 bool *lock_and_validation_needed)
10974 {
10975 struct dm_atomic_state *dm_state = NULL;
10976 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10977 struct dc_stream_state *new_stream;
10978 struct amdgpu_device *adev = dm->adev;
10979 int ret = 0;
10980
10981 /*
10982 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10983 * update changed items
10984 */
10985 struct amdgpu_crtc *acrtc = NULL;
10986 struct drm_connector *connector = NULL;
10987 struct amdgpu_dm_connector *aconnector = NULL;
10988 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10989 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10990
10991 new_stream = NULL;
10992
10993 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10994 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10995 acrtc = to_amdgpu_crtc(crtc);
10996 connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10997 if (connector)
10998 aconnector = to_amdgpu_dm_connector(connector);
10999
11000 /* TODO This hack should go away */
11001 if (connector && enable) {
11002 /* Make sure fake sink is created in plug-in scenario */
11003 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
11004 connector);
11005 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
11006 connector);
11007
11008 if (WARN_ON(!drm_new_conn_state)) {
11009 ret = -EINVAL;
11010 goto fail;
11011 }
11012
11013 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
11014 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
11015
11016 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
11017 goto skip_modeset;
11018
11019 new_stream = create_validate_stream_for_sink(connector,
11020 &new_crtc_state->mode,
11021 dm_new_conn_state,
11022 dm_old_crtc_state->stream);
11023
11024 /*
11025 * we can have no stream on ACTION_SET if a display
11026 * was disconnected during S3, in this case it is not an
11027 * error, the OS will be updated after detection, and
11028 * will do the right thing on next atomic commit
11029 */
11030
11031 if (!new_stream) {
11032 drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
11033 __func__, acrtc->base.base.id);
11034 ret = -ENOMEM;
11035 goto fail;
11036 }
11037
11038 /*
11039 * TODO: Check VSDB bits to decide whether this should
11040 * be enabled or not.
11041 */
11042 new_stream->triggered_crtc_reset.enabled =
11043 dm->force_timing_sync;
11044
11045 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
11046
11047 ret = fill_hdr_info_packet(drm_new_conn_state,
11048 &new_stream->hdr_static_metadata);
11049 if (ret)
11050 goto fail;
11051
11052 /*
11053 * If we already removed the old stream from the context
11054 * (and set the new stream to NULL) then we can't reuse
11055 * the old stream even if the stream and scaling are unchanged.
11056 * We'll hit the BUG_ON and black screen.
11057 *
11058 * TODO: Refactor this function to allow this check to work
11059 * in all conditions.
11060 */
11061 if (amdgpu_freesync_vid_mode &&
11062 dm_new_crtc_state->stream &&
11063 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
11064 goto skip_modeset;
11065
11066 if (dm_new_crtc_state->stream &&
11067 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
11068 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
11069 new_crtc_state->mode_changed = false;
11070 drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
11071 new_crtc_state->mode_changed);
11072 }
11073 }
11074
11075 /* mode_changed flag may get updated above, need to check again */
11076 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
11077 goto skip_modeset;
11078
11079 drm_dbg_state(state->dev,
11080 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
11081 acrtc->crtc_id,
11082 new_crtc_state->enable,
11083 new_crtc_state->active,
11084 new_crtc_state->planes_changed,
11085 new_crtc_state->mode_changed,
11086 new_crtc_state->active_changed,
11087 new_crtc_state->connectors_changed);
11088
11089 /* Remove stream for any changed/disabled CRTC */
11090 if (!enable) {
11091
11092 if (!dm_old_crtc_state->stream)
11093 goto skip_modeset;
11094
11095 /* Unset freesync video if it was active before */
11096 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
11097 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
11098 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
11099 }
11100
11101 /* Now check if we should set freesync video mode */
11102 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
11103 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
11104 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
11105 is_timing_unchanged_for_freesync(new_crtc_state,
11106 old_crtc_state)) {
11107 new_crtc_state->mode_changed = false;
11108 drm_dbg_driver(adev_to_drm(adev),
11109 "Mode change not required for front porch change, setting mode_changed to %d",
11110 new_crtc_state->mode_changed);
11111
11112 set_freesync_fixed_config(dm_new_crtc_state);
11113
11114 goto skip_modeset;
11115 } else if (amdgpu_freesync_vid_mode && aconnector &&
11116 is_freesync_video_mode(&new_crtc_state->mode,
11117 aconnector)) {
11118 struct drm_display_mode *high_mode;
11119
11120 high_mode = get_highest_refresh_rate_mode(aconnector, false);
11121 if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
11122 set_freesync_fixed_config(dm_new_crtc_state);
11123 }
11124
11125 ret = dm_atomic_get_state(state, &dm_state);
11126 if (ret)
11127 goto fail;
11128
11129 drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
11130 crtc->base.id);
11131
11132 /* i.e. reset mode */
11133 if (dc_state_remove_stream(
11134 dm->dc,
11135 dm_state->context,
11136 dm_old_crtc_state->stream) != DC_OK) {
11137 ret = -EINVAL;
11138 goto fail;
11139 }
11140
11141 dc_stream_release(dm_old_crtc_state->stream);
11142 dm_new_crtc_state->stream = NULL;
11143
11144 reset_freesync_config_for_crtc(dm_new_crtc_state);
11145
11146 *lock_and_validation_needed = true;
11147
11148 } else {/* Add stream for any updated/enabled CRTC */
11149 /*
11150 * Quick fix to prevent NULL pointer on new_stream when
11151 * added MST connectors not found in existing crtc_state in the chained mode
11152 * TODO: need to dig out the root cause of that
11153 */
11154 if (!connector)
11155 goto skip_modeset;
11156
11157 if (modereset_required(new_crtc_state))
11158 goto skip_modeset;
11159
11160 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
11161 dm_old_crtc_state->stream)) {
11162
11163 WARN_ON(dm_new_crtc_state->stream);
11164
11165 ret = dm_atomic_get_state(state, &dm_state);
11166 if (ret)
11167 goto fail;
11168
11169 dm_new_crtc_state->stream = new_stream;
11170
11171 dc_stream_retain(new_stream);
11172
11173 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
11174 crtc->base.id);
11175
11176 if (dc_state_add_stream(
11177 dm->dc,
11178 dm_state->context,
11179 dm_new_crtc_state->stream) != DC_OK) {
11180 ret = -EINVAL;
11181 goto fail;
11182 }
11183
11184 *lock_and_validation_needed = true;
11185 }
11186 }
11187
11188 skip_modeset:
11189 /* Release extra reference */
11190 if (new_stream)
11191 dc_stream_release(new_stream);
11192
11193 /*
11194 * We want to do dc stream updates that do not require a
11195 * full modeset below.
11196 */
11197 if (!(enable && connector && new_crtc_state->active))
11198 return 0;
11199 /*
11200 * Given above conditions, the dc state cannot be NULL because:
11201 * 1. We're in the process of enabling CRTCs (just been added
11202 * to the dc context, or already is on the context)
11203 * 2. Has a valid connector attached, and
11204 * 3. Is currently active and enabled.
11205 * => The dc stream state currently exists.
11206 */
11207 BUG_ON(dm_new_crtc_state->stream == NULL);
11208
11209 /* Scaling or underscan settings */
11210 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
11211 drm_atomic_crtc_needs_modeset(new_crtc_state))
11212 update_stream_scaling_settings(
11213 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
11214
11215 /* ABM settings */
11216 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
11217
11218 /*
11219 * Color management settings. We also update color properties
11220 * when a modeset is needed, to ensure it gets reprogrammed.
11221 */
11222 if (dm_new_crtc_state->base.color_mgmt_changed ||
11223 dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
11224 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11225 ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
11226 if (ret)
11227 goto fail;
11228 }
11229
11230 /* Update Freesync settings. */
11231 get_freesync_config_for_crtc(dm_new_crtc_state,
11232 dm_new_conn_state);
11233
11234 return ret;
11235
11236 fail:
11237 if (new_stream)
11238 dc_stream_release(new_stream);
11239 return ret;
11240 }
11241
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)11242 static bool should_reset_plane(struct drm_atomic_state *state,
11243 struct drm_plane *plane,
11244 struct drm_plane_state *old_plane_state,
11245 struct drm_plane_state *new_plane_state)
11246 {
11247 struct drm_plane *other;
11248 struct drm_plane_state *old_other_state, *new_other_state;
11249 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11250 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
11251 struct amdgpu_device *adev = drm_to_adev(plane->dev);
11252 int i;
11253
11254 /*
11255 * TODO: Remove this hack for all asics once it proves that the
11256 * fast updates works fine on DCN3.2+.
11257 */
11258 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
11259 state->allow_modeset)
11260 return true;
11261
11262 if (amdgpu_in_reset(adev) && state->allow_modeset)
11263 return true;
11264
11265 /* Exit early if we know that we're adding or removing the plane. */
11266 if (old_plane_state->crtc != new_plane_state->crtc)
11267 return true;
11268
11269 /* old crtc == new_crtc == NULL, plane not in context. */
11270 if (!new_plane_state->crtc)
11271 return false;
11272
11273 new_crtc_state =
11274 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
11275 old_crtc_state =
11276 drm_atomic_get_old_crtc_state(state, old_plane_state->crtc);
11277
11278 if (!new_crtc_state)
11279 return true;
11280
11281 /*
11282 * A change in cursor mode means a new dc pipe needs to be acquired or
11283 * released from the state
11284 */
11285 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
11286 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
11287 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
11288 old_dm_crtc_state != NULL &&
11289 old_dm_crtc_state->cursor_mode != new_dm_crtc_state->cursor_mode) {
11290 return true;
11291 }
11292
11293 /* CRTC Degamma changes currently require us to recreate planes. */
11294 if (new_crtc_state->color_mgmt_changed)
11295 return true;
11296
11297 /*
11298 * On zpos change, planes need to be reordered by removing and re-adding
11299 * them one by one to the dc state, in order of descending zpos.
11300 *
11301 * TODO: We can likely skip bandwidth validation if the only thing that
11302 * changed about the plane was it'z z-ordering.
11303 */
11304 if (old_plane_state->normalized_zpos != new_plane_state->normalized_zpos)
11305 return true;
11306
11307 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
11308 return true;
11309
11310 /*
11311 * If there are any new primary or overlay planes being added or
11312 * removed then the z-order can potentially change. To ensure
11313 * correct z-order and pipe acquisition the current DC architecture
11314 * requires us to remove and recreate all existing planes.
11315 *
11316 * TODO: Come up with a more elegant solution for this.
11317 */
11318 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
11319 struct amdgpu_framebuffer *old_afb, *new_afb;
11320 struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
11321
11322 dm_new_other_state = to_dm_plane_state(new_other_state);
11323 dm_old_other_state = to_dm_plane_state(old_other_state);
11324
11325 if (other->type == DRM_PLANE_TYPE_CURSOR)
11326 continue;
11327
11328 if (old_other_state->crtc != new_plane_state->crtc &&
11329 new_other_state->crtc != new_plane_state->crtc)
11330 continue;
11331
11332 if (old_other_state->crtc != new_other_state->crtc)
11333 return true;
11334
11335 /* Src/dst size and scaling updates. */
11336 if (old_other_state->src_w != new_other_state->src_w ||
11337 old_other_state->src_h != new_other_state->src_h ||
11338 old_other_state->crtc_w != new_other_state->crtc_w ||
11339 old_other_state->crtc_h != new_other_state->crtc_h)
11340 return true;
11341
11342 /* Rotation / mirroring updates. */
11343 if (old_other_state->rotation != new_other_state->rotation)
11344 return true;
11345
11346 /* Blending updates. */
11347 if (old_other_state->pixel_blend_mode !=
11348 new_other_state->pixel_blend_mode)
11349 return true;
11350
11351 /* Alpha updates. */
11352 if (old_other_state->alpha != new_other_state->alpha)
11353 return true;
11354
11355 /* Colorspace changes. */
11356 if (old_other_state->color_range != new_other_state->color_range ||
11357 old_other_state->color_encoding != new_other_state->color_encoding)
11358 return true;
11359
11360 /* HDR/Transfer Function changes. */
11361 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
11362 dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
11363 dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
11364 dm_old_other_state->ctm != dm_new_other_state->ctm ||
11365 dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
11366 dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
11367 dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
11368 dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
11369 dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
11370 return true;
11371
11372 /* Framebuffer checks fall at the end. */
11373 if (!old_other_state->fb || !new_other_state->fb)
11374 continue;
11375
11376 /* Pixel format changes can require bandwidth updates. */
11377 if (old_other_state->fb->format != new_other_state->fb->format)
11378 return true;
11379
11380 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
11381 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
11382
11383 /* Tiling and DCC changes also require bandwidth updates. */
11384 if (old_afb->tiling_flags != new_afb->tiling_flags ||
11385 old_afb->base.modifier != new_afb->base.modifier)
11386 return true;
11387 }
11388
11389 return false;
11390 }
11391
dm_check_cursor_fb(struct amdgpu_crtc * new_acrtc,struct drm_plane_state * new_plane_state,struct drm_framebuffer * fb)11392 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
11393 struct drm_plane_state *new_plane_state,
11394 struct drm_framebuffer *fb)
11395 {
11396 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
11397 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
11398 unsigned int pitch;
11399 bool linear;
11400
11401 if (fb->width > new_acrtc->max_cursor_width ||
11402 fb->height > new_acrtc->max_cursor_height) {
11403 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
11404 new_plane_state->fb->width,
11405 new_plane_state->fb->height);
11406 return -EINVAL;
11407 }
11408 if (new_plane_state->src_w != fb->width << 16 ||
11409 new_plane_state->src_h != fb->height << 16) {
11410 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
11411 return -EINVAL;
11412 }
11413
11414 /* Pitch in pixels */
11415 pitch = fb->pitches[0] / fb->format->cpp[0];
11416
11417 if (fb->width != pitch) {
11418 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
11419 fb->width, pitch);
11420 return -EINVAL;
11421 }
11422
11423 switch (pitch) {
11424 case 64:
11425 case 128:
11426 case 256:
11427 /* FB pitch is supported by cursor plane */
11428 break;
11429 default:
11430 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
11431 return -EINVAL;
11432 }
11433
11434 /* Core DRM takes care of checking FB modifiers, so we only need to
11435 * check tiling flags when the FB doesn't have a modifier.
11436 */
11437 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
11438 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
11439 linear = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE) == 0;
11440 } else if (adev->family >= AMDGPU_FAMILY_AI) {
11441 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
11442 } else {
11443 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
11444 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
11445 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
11446 }
11447 if (!linear) {
11448 DRM_DEBUG_ATOMIC("Cursor FB not linear");
11449 return -EINVAL;
11450 }
11451 }
11452
11453 return 0;
11454 }
11455
11456 /*
11457 * Helper function for checking the cursor in native mode
11458 */
dm_check_native_cursor_state(struct drm_crtc * new_plane_crtc,struct drm_plane * plane,struct drm_plane_state * new_plane_state,bool enable)11459 static int dm_check_native_cursor_state(struct drm_crtc *new_plane_crtc,
11460 struct drm_plane *plane,
11461 struct drm_plane_state *new_plane_state,
11462 bool enable)
11463 {
11464
11465 struct amdgpu_crtc *new_acrtc;
11466 int ret;
11467
11468 if (!enable || !new_plane_crtc ||
11469 drm_atomic_plane_disabling(plane->state, new_plane_state))
11470 return 0;
11471
11472 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
11473
11474 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
11475 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
11476 return -EINVAL;
11477 }
11478
11479 if (new_plane_state->fb) {
11480 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
11481 new_plane_state->fb);
11482 if (ret)
11483 return ret;
11484 }
11485
11486 return 0;
11487 }
11488
dm_should_update_native_cursor(struct drm_atomic_state * state,struct drm_crtc * old_plane_crtc,struct drm_crtc * new_plane_crtc,bool enable)11489 static bool dm_should_update_native_cursor(struct drm_atomic_state *state,
11490 struct drm_crtc *old_plane_crtc,
11491 struct drm_crtc *new_plane_crtc,
11492 bool enable)
11493 {
11494 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11495 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11496
11497 if (!enable) {
11498 if (old_plane_crtc == NULL)
11499 return true;
11500
11501 old_crtc_state = drm_atomic_get_old_crtc_state(
11502 state, old_plane_crtc);
11503 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11504
11505 return dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
11506 } else {
11507 if (new_plane_crtc == NULL)
11508 return true;
11509
11510 new_crtc_state = drm_atomic_get_new_crtc_state(
11511 state, new_plane_crtc);
11512 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11513
11514 return dm_new_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE;
11515 }
11516 }
11517
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed,bool * is_top_most_overlay)11518 static int dm_update_plane_state(struct dc *dc,
11519 struct drm_atomic_state *state,
11520 struct drm_plane *plane,
11521 struct drm_plane_state *old_plane_state,
11522 struct drm_plane_state *new_plane_state,
11523 bool enable,
11524 bool *lock_and_validation_needed,
11525 bool *is_top_most_overlay)
11526 {
11527
11528 struct dm_atomic_state *dm_state = NULL;
11529 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
11530 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11531 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
11532 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
11533 bool needs_reset, update_native_cursor;
11534 int ret = 0;
11535
11536
11537 new_plane_crtc = new_plane_state->crtc;
11538 old_plane_crtc = old_plane_state->crtc;
11539 dm_new_plane_state = to_dm_plane_state(new_plane_state);
11540 dm_old_plane_state = to_dm_plane_state(old_plane_state);
11541
11542 update_native_cursor = dm_should_update_native_cursor(state,
11543 old_plane_crtc,
11544 new_plane_crtc,
11545 enable);
11546
11547 if (plane->type == DRM_PLANE_TYPE_CURSOR && update_native_cursor) {
11548 ret = dm_check_native_cursor_state(new_plane_crtc, plane,
11549 new_plane_state, enable);
11550 if (ret)
11551 return ret;
11552
11553 return 0;
11554 }
11555
11556 needs_reset = should_reset_plane(state, plane, old_plane_state,
11557 new_plane_state);
11558
11559 /* Remove any changed/removed planes */
11560 if (!enable) {
11561 if (!needs_reset)
11562 return 0;
11563
11564 if (!old_plane_crtc)
11565 return 0;
11566
11567 old_crtc_state = drm_atomic_get_old_crtc_state(
11568 state, old_plane_crtc);
11569 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11570
11571 if (!dm_old_crtc_state->stream)
11572 return 0;
11573
11574 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
11575 plane->base.id, old_plane_crtc->base.id);
11576
11577 ret = dm_atomic_get_state(state, &dm_state);
11578 if (ret)
11579 return ret;
11580
11581 if (!dc_state_remove_plane(
11582 dc,
11583 dm_old_crtc_state->stream,
11584 dm_old_plane_state->dc_state,
11585 dm_state->context)) {
11586
11587 return -EINVAL;
11588 }
11589
11590 if (dm_old_plane_state->dc_state)
11591 dc_plane_state_release(dm_old_plane_state->dc_state);
11592
11593 dm_new_plane_state->dc_state = NULL;
11594
11595 *lock_and_validation_needed = true;
11596
11597 } else { /* Add new planes */
11598 struct dc_plane_state *dc_new_plane_state;
11599
11600 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11601 return 0;
11602
11603 if (!new_plane_crtc)
11604 return 0;
11605
11606 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11607 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11608
11609 if (!dm_new_crtc_state->stream)
11610 return 0;
11611
11612 if (!needs_reset)
11613 return 0;
11614
11615 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11616 if (ret)
11617 goto out;
11618
11619 WARN_ON(dm_new_plane_state->dc_state);
11620
11621 dc_new_plane_state = dc_create_plane_state(dc);
11622 if (!dc_new_plane_state) {
11623 ret = -ENOMEM;
11624 goto out;
11625 }
11626
11627 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11628 plane->base.id, new_plane_crtc->base.id);
11629
11630 ret = fill_dc_plane_attributes(
11631 drm_to_adev(new_plane_crtc->dev),
11632 dc_new_plane_state,
11633 new_plane_state,
11634 new_crtc_state);
11635 if (ret) {
11636 dc_plane_state_release(dc_new_plane_state);
11637 goto out;
11638 }
11639
11640 ret = dm_atomic_get_state(state, &dm_state);
11641 if (ret) {
11642 dc_plane_state_release(dc_new_plane_state);
11643 goto out;
11644 }
11645
11646 /*
11647 * Any atomic check errors that occur after this will
11648 * not need a release. The plane state will be attached
11649 * to the stream, and therefore part of the atomic
11650 * state. It'll be released when the atomic state is
11651 * cleaned.
11652 */
11653 if (!dc_state_add_plane(
11654 dc,
11655 dm_new_crtc_state->stream,
11656 dc_new_plane_state,
11657 dm_state->context)) {
11658
11659 dc_plane_state_release(dc_new_plane_state);
11660 ret = -EINVAL;
11661 goto out;
11662 }
11663
11664 dm_new_plane_state->dc_state = dc_new_plane_state;
11665
11666 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11667
11668 /* Tell DC to do a full surface update every time there
11669 * is a plane change. Inefficient, but works for now.
11670 */
11671 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11672
11673 *lock_and_validation_needed = true;
11674 }
11675
11676 out:
11677 /* If enabling cursor overlay failed, attempt fallback to native mode */
11678 if (enable && ret == -EINVAL && plane->type == DRM_PLANE_TYPE_CURSOR) {
11679 ret = dm_check_native_cursor_state(new_plane_crtc, plane,
11680 new_plane_state, enable);
11681 if (ret)
11682 return ret;
11683
11684 dm_new_crtc_state->cursor_mode = DM_CURSOR_NATIVE_MODE;
11685 }
11686
11687 return ret;
11688 }
11689
dm_get_oriented_plane_size(struct drm_plane_state * plane_state,int * src_w,int * src_h)11690 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11691 int *src_w, int *src_h)
11692 {
11693 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11694 case DRM_MODE_ROTATE_90:
11695 case DRM_MODE_ROTATE_270:
11696 *src_w = plane_state->src_h >> 16;
11697 *src_h = plane_state->src_w >> 16;
11698 break;
11699 case DRM_MODE_ROTATE_0:
11700 case DRM_MODE_ROTATE_180:
11701 default:
11702 *src_w = plane_state->src_w >> 16;
11703 *src_h = plane_state->src_h >> 16;
11704 break;
11705 }
11706 }
11707
11708 static void
dm_get_plane_scale(struct drm_plane_state * plane_state,int * out_plane_scale_w,int * out_plane_scale_h)11709 dm_get_plane_scale(struct drm_plane_state *plane_state,
11710 int *out_plane_scale_w, int *out_plane_scale_h)
11711 {
11712 int plane_src_w, plane_src_h;
11713
11714 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
11715 *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
11716 *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
11717 }
11718
11719 /*
11720 * The normalized_zpos value cannot be used by this iterator directly. It's only
11721 * calculated for enabled planes, potentially causing normalized_zpos collisions
11722 * between enabled/disabled planes in the atomic state. We need a unique value
11723 * so that the iterator will not generate the same object twice, or loop
11724 * indefinitely.
11725 */
__get_next_zpos(struct drm_atomic_state * state,struct __drm_planes_state * prev)11726 static inline struct __drm_planes_state *__get_next_zpos(
11727 struct drm_atomic_state *state,
11728 struct __drm_planes_state *prev)
11729 {
11730 unsigned int highest_zpos = 0, prev_zpos = 256;
11731 uint32_t highest_id = 0, prev_id = UINT_MAX;
11732 struct drm_plane_state *new_plane_state;
11733 struct drm_plane *plane;
11734 int i, highest_i = -1;
11735
11736 if (prev != NULL) {
11737 prev_zpos = prev->new_state->zpos;
11738 prev_id = prev->ptr->base.id;
11739 }
11740
11741 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
11742 /* Skip planes with higher zpos than the previously returned */
11743 if (new_plane_state->zpos > prev_zpos ||
11744 (new_plane_state->zpos == prev_zpos &&
11745 plane->base.id >= prev_id))
11746 continue;
11747
11748 /* Save the index of the plane with highest zpos */
11749 if (new_plane_state->zpos > highest_zpos ||
11750 (new_plane_state->zpos == highest_zpos &&
11751 plane->base.id > highest_id)) {
11752 highest_zpos = new_plane_state->zpos;
11753 highest_id = plane->base.id;
11754 highest_i = i;
11755 }
11756 }
11757
11758 if (highest_i < 0)
11759 return NULL;
11760
11761 return &state->planes[highest_i];
11762 }
11763
11764 /*
11765 * Use the uniqueness of the plane's (zpos, drm obj ID) combination to iterate
11766 * by descending zpos, as read from the new plane state. This is the same
11767 * ordering as defined by drm_atomic_normalize_zpos().
11768 */
11769 #define for_each_oldnew_plane_in_descending_zpos(__state, plane, old_plane_state, new_plane_state) \
11770 for (struct __drm_planes_state *__i = __get_next_zpos((__state), NULL); \
11771 __i != NULL; __i = __get_next_zpos((__state), __i)) \
11772 for_each_if(((plane) = __i->ptr, \
11773 (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
11774 (old_plane_state) = __i->old_state, \
11775 (new_plane_state) = __i->new_state, 1))
11776
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)11777 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11778 {
11779 struct drm_connector *connector;
11780 struct drm_connector_state *conn_state, *old_conn_state;
11781 struct amdgpu_dm_connector *aconnector = NULL;
11782 int i;
11783
11784 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11785 if (!conn_state->crtc)
11786 conn_state = old_conn_state;
11787
11788 if (conn_state->crtc != crtc)
11789 continue;
11790
11791 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
11792 continue;
11793
11794 aconnector = to_amdgpu_dm_connector(connector);
11795 if (!aconnector->mst_output_port || !aconnector->mst_root)
11796 aconnector = NULL;
11797 else
11798 break;
11799 }
11800
11801 if (!aconnector)
11802 return 0;
11803
11804 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
11805 }
11806
11807 /**
11808 * DOC: Cursor Modes - Native vs Overlay
11809 *
11810 * In native mode, the cursor uses a integrated cursor pipe within each DCN hw
11811 * plane. It does not require a dedicated hw plane to enable, but it is
11812 * subjected to the same z-order and scaling as the hw plane. It also has format
11813 * restrictions, a RGB cursor in native mode cannot be enabled within a non-RGB
11814 * hw plane.
11815 *
11816 * In overlay mode, the cursor uses a separate DCN hw plane, and thus has its
11817 * own scaling and z-pos. It also has no blending restrictions. It lends to a
11818 * cursor behavior more akin to a DRM client's expectations. However, it does
11819 * occupy an extra DCN plane, and therefore will only be used if a DCN plane is
11820 * available.
11821 */
11822
11823 /**
11824 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc
11825 * @adev: amdgpu device
11826 * @state: DRM atomic state
11827 * @dm_crtc_state: amdgpu state for the CRTC containing the cursor
11828 * @cursor_mode: Returns the required cursor mode on dm_crtc_state
11829 *
11830 * Get whether the cursor should be enabled in native mode, or overlay mode, on
11831 * the dm_crtc_state.
11832 *
11833 * The cursor should be enabled in overlay mode if there exists an underlying
11834 * plane - on which the cursor may be blended - that is either YUV formatted, or
11835 * scaled differently from the cursor.
11836 *
11837 * Since zpos info is required, drm_atomic_normalize_zpos must be called before
11838 * calling this function.
11839 *
11840 * Return: 0 on success, or an error code if getting the cursor plane state
11841 * failed.
11842 */
dm_crtc_get_cursor_mode(struct amdgpu_device * adev,struct drm_atomic_state * state,struct dm_crtc_state * dm_crtc_state,enum amdgpu_dm_cursor_mode * cursor_mode)11843 static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
11844 struct drm_atomic_state *state,
11845 struct dm_crtc_state *dm_crtc_state,
11846 enum amdgpu_dm_cursor_mode *cursor_mode)
11847 {
11848 struct drm_plane_state *old_plane_state, *plane_state, *cursor_state;
11849 struct drm_crtc_state *crtc_state = &dm_crtc_state->base;
11850 struct drm_plane *plane;
11851 bool consider_mode_change = false;
11852 bool entire_crtc_covered = false;
11853 bool cursor_changed = false;
11854 int underlying_scale_w, underlying_scale_h;
11855 int cursor_scale_w, cursor_scale_h;
11856 int i;
11857
11858 /* Overlay cursor not supported on HW before DCN
11859 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions
11860 * as previous DCN generations, so enable native mode on DCN401 in addition to DCE
11861 */
11862 if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 ||
11863 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
11864 *cursor_mode = DM_CURSOR_NATIVE_MODE;
11865 return 0;
11866 }
11867
11868 /* Init cursor_mode to be the same as current */
11869 *cursor_mode = dm_crtc_state->cursor_mode;
11870
11871 /*
11872 * Cursor mode can change if a plane's format changes, scale changes, is
11873 * enabled/disabled, or z-order changes.
11874 */
11875 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) {
11876 int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
11877
11878 /* Only care about planes on this CRTC */
11879 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0)
11880 continue;
11881
11882 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11883 cursor_changed = true;
11884
11885 if (drm_atomic_plane_enabling(old_plane_state, plane_state) ||
11886 drm_atomic_plane_disabling(old_plane_state, plane_state) ||
11887 old_plane_state->fb->format != plane_state->fb->format) {
11888 consider_mode_change = true;
11889 break;
11890 }
11891
11892 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h);
11893 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
11894 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
11895 consider_mode_change = true;
11896 break;
11897 }
11898 }
11899
11900 if (!consider_mode_change && !crtc_state->zpos_changed)
11901 return 0;
11902
11903 /*
11904 * If no cursor change on this CRTC, and not enabled on this CRTC, then
11905 * no need to set cursor mode. This avoids needlessly locking the cursor
11906 * state.
11907 */
11908 if (!cursor_changed &&
11909 !(drm_plane_mask(crtc_state->crtc->cursor) & crtc_state->plane_mask)) {
11910 return 0;
11911 }
11912
11913 cursor_state = drm_atomic_get_plane_state(state,
11914 crtc_state->crtc->cursor);
11915 if (IS_ERR(cursor_state))
11916 return PTR_ERR(cursor_state);
11917
11918 /* Cursor is disabled */
11919 if (!cursor_state->fb)
11920 return 0;
11921
11922 /* For all planes in descending z-order (all of which are below cursor
11923 * as per zpos definitions), check their scaling and format
11924 */
11925 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, plane_state) {
11926
11927 /* Only care about non-cursor planes on this CRTC */
11928 if ((drm_plane_mask(plane) & crtc_state->plane_mask) == 0 ||
11929 plane->type == DRM_PLANE_TYPE_CURSOR)
11930 continue;
11931
11932 /* Underlying plane is YUV format - use overlay cursor */
11933 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) {
11934 *cursor_mode = DM_CURSOR_OVERLAY_MODE;
11935 return 0;
11936 }
11937
11938 dm_get_plane_scale(plane_state,
11939 &underlying_scale_w, &underlying_scale_h);
11940 dm_get_plane_scale(cursor_state,
11941 &cursor_scale_w, &cursor_scale_h);
11942
11943 /* Underlying plane has different scale - use overlay cursor */
11944 if (cursor_scale_w != underlying_scale_w &&
11945 cursor_scale_h != underlying_scale_h) {
11946 *cursor_mode = DM_CURSOR_OVERLAY_MODE;
11947 return 0;
11948 }
11949
11950 /* If this plane covers the whole CRTC, no need to check planes underneath */
11951 if (plane_state->crtc_x <= 0 && plane_state->crtc_y <= 0 &&
11952 plane_state->crtc_x + plane_state->crtc_w >= crtc_state->mode.hdisplay &&
11953 plane_state->crtc_y + plane_state->crtc_h >= crtc_state->mode.vdisplay) {
11954 entire_crtc_covered = true;
11955 break;
11956 }
11957 }
11958
11959 /* If planes do not cover the entire CRTC, use overlay mode to enable
11960 * cursor over holes
11961 */
11962 if (entire_crtc_covered)
11963 *cursor_mode = DM_CURSOR_NATIVE_MODE;
11964 else
11965 *cursor_mode = DM_CURSOR_OVERLAY_MODE;
11966
11967 return 0;
11968 }
11969
amdgpu_dm_crtc_mem_type_changed(struct drm_device * dev,struct drm_atomic_state * state,struct drm_crtc_state * crtc_state)11970 static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
11971 struct drm_atomic_state *state,
11972 struct drm_crtc_state *crtc_state)
11973 {
11974 struct drm_plane *plane;
11975 struct drm_plane_state *new_plane_state, *old_plane_state;
11976
11977 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
11978 new_plane_state = drm_atomic_get_plane_state(state, plane);
11979 old_plane_state = drm_atomic_get_plane_state(state, plane);
11980
11981 if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
11982 drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
11983 return false;
11984 }
11985
11986 if (old_plane_state->fb && new_plane_state->fb &&
11987 get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
11988 return true;
11989 }
11990
11991 return false;
11992 }
11993
11994 /**
11995 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11996 *
11997 * @dev: The DRM device
11998 * @state: The atomic state to commit
11999 *
12000 * Validate that the given atomic state is programmable by DC into hardware.
12001 * This involves constructing a &struct dc_state reflecting the new hardware
12002 * state we wish to commit, then querying DC to see if it is programmable. It's
12003 * important not to modify the existing DC state. Otherwise, atomic_check
12004 * may unexpectedly commit hardware changes.
12005 *
12006 * When validating the DC state, it's important that the right locks are
12007 * acquired. For full updates case which removes/adds/updates streams on one
12008 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
12009 * that any such full update commit will wait for completion of any outstanding
12010 * flip using DRMs synchronization events.
12011 *
12012 * Note that DM adds the affected connectors for all CRTCs in state, when that
12013 * might not seem necessary. This is because DC stream creation requires the
12014 * DC sink, which is tied to the DRM connector state. Cleaning this up should
12015 * be possible but non-trivial - a possible TODO item.
12016 *
12017 * Return: -Error code if validation failed.
12018 */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)12019 static int amdgpu_dm_atomic_check(struct drm_device *dev,
12020 struct drm_atomic_state *state)
12021 {
12022 struct amdgpu_device *adev = drm_to_adev(dev);
12023 struct dm_atomic_state *dm_state = NULL;
12024 struct dc *dc = adev->dm.dc;
12025 struct drm_connector *connector;
12026 struct drm_connector_state *old_con_state, *new_con_state;
12027 struct drm_crtc *crtc;
12028 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12029 struct drm_plane *plane;
12030 struct drm_plane_state *old_plane_state, *new_plane_state, *new_cursor_state;
12031 enum dc_status status;
12032 int ret, i;
12033 bool lock_and_validation_needed = false;
12034 bool is_top_most_overlay = true;
12035 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
12036 struct drm_dp_mst_topology_mgr *mgr;
12037 struct drm_dp_mst_topology_state *mst_state;
12038 struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
12039
12040 trace_amdgpu_dm_atomic_check_begin(state);
12041
12042 ret = drm_atomic_helper_check_modeset(dev, state);
12043 if (ret) {
12044 drm_dbg_atomic(dev, "drm_atomic_helper_check_modeset() failed\n");
12045 goto fail;
12046 }
12047
12048 /* Check connector changes */
12049 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
12050 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
12051 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
12052
12053 /* Skip connectors that are disabled or part of modeset already. */
12054 if (!new_con_state->crtc)
12055 continue;
12056
12057 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
12058 if (IS_ERR(new_crtc_state)) {
12059 drm_dbg_atomic(dev, "drm_atomic_get_crtc_state() failed\n");
12060 ret = PTR_ERR(new_crtc_state);
12061 goto fail;
12062 }
12063
12064 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
12065 dm_old_con_state->scaling != dm_new_con_state->scaling)
12066 new_crtc_state->connectors_changed = true;
12067 }
12068
12069 if (dc_resource_is_dsc_encoding_supported(dc)) {
12070 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12071 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
12072 ret = add_affected_mst_dsc_crtcs(state, crtc);
12073 if (ret) {
12074 drm_dbg_atomic(dev, "add_affected_mst_dsc_crtcs() failed\n");
12075 goto fail;
12076 }
12077 }
12078 }
12079 }
12080 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12081 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
12082
12083 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
12084 !new_crtc_state->color_mgmt_changed &&
12085 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
12086 dm_old_crtc_state->dsc_force_changed == false)
12087 continue;
12088
12089 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
12090 if (ret) {
12091 drm_dbg_atomic(dev, "amdgpu_dm_verify_lut_sizes() failed\n");
12092 goto fail;
12093 }
12094
12095 if (!new_crtc_state->enable)
12096 continue;
12097
12098 ret = drm_atomic_add_affected_connectors(state, crtc);
12099 if (ret) {
12100 drm_dbg_atomic(dev, "drm_atomic_add_affected_connectors() failed\n");
12101 goto fail;
12102 }
12103
12104 ret = drm_atomic_add_affected_planes(state, crtc);
12105 if (ret) {
12106 drm_dbg_atomic(dev, "drm_atomic_add_affected_planes() failed\n");
12107 goto fail;
12108 }
12109
12110 if (dm_old_crtc_state->dsc_force_changed)
12111 new_crtc_state->mode_changed = true;
12112 }
12113
12114 /*
12115 * Add all primary and overlay planes on the CRTC to the state
12116 * whenever a plane is enabled to maintain correct z-ordering
12117 * and to enable fast surface updates.
12118 */
12119 drm_for_each_crtc(crtc, dev) {
12120 bool modified = false;
12121
12122 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
12123 if (plane->type == DRM_PLANE_TYPE_CURSOR)
12124 continue;
12125
12126 if (new_plane_state->crtc == crtc ||
12127 old_plane_state->crtc == crtc) {
12128 modified = true;
12129 break;
12130 }
12131 }
12132
12133 if (!modified)
12134 continue;
12135
12136 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
12137 if (plane->type == DRM_PLANE_TYPE_CURSOR)
12138 continue;
12139
12140 new_plane_state =
12141 drm_atomic_get_plane_state(state, plane);
12142
12143 if (IS_ERR(new_plane_state)) {
12144 ret = PTR_ERR(new_plane_state);
12145 drm_dbg_atomic(dev, "new_plane_state is BAD\n");
12146 goto fail;
12147 }
12148 }
12149 }
12150
12151 /*
12152 * DC consults the zpos (layer_index in DC terminology) to determine the
12153 * hw plane on which to enable the hw cursor (see
12154 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
12155 * atomic state, so call drm helper to normalize zpos.
12156 */
12157 ret = drm_atomic_normalize_zpos(dev, state);
12158 if (ret) {
12159 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
12160 goto fail;
12161 }
12162
12163 /*
12164 * Determine whether cursors on each CRTC should be enabled in native or
12165 * overlay mode.
12166 */
12167 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12168 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
12169
12170 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
12171 &dm_new_crtc_state->cursor_mode);
12172 if (ret) {
12173 drm_dbg(dev, "Failed to determine cursor mode\n");
12174 goto fail;
12175 }
12176
12177 /*
12178 * If overlay cursor is needed, DC cannot go through the
12179 * native cursor update path. All enabled planes on the CRTC
12180 * need to be added for DC to not disable a plane by mistake
12181 */
12182 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) {
12183 ret = drm_atomic_add_affected_planes(state, crtc);
12184 if (ret)
12185 goto fail;
12186 }
12187 }
12188
12189 /* Remove exiting planes if they are modified */
12190 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
12191
12192 ret = dm_update_plane_state(dc, state, plane,
12193 old_plane_state,
12194 new_plane_state,
12195 false,
12196 &lock_and_validation_needed,
12197 &is_top_most_overlay);
12198 if (ret) {
12199 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
12200 goto fail;
12201 }
12202 }
12203
12204 /* Disable all crtcs which require disable */
12205 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12206 ret = dm_update_crtc_state(&adev->dm, state, crtc,
12207 old_crtc_state,
12208 new_crtc_state,
12209 false,
12210 &lock_and_validation_needed);
12211 if (ret) {
12212 drm_dbg_atomic(dev, "DISABLE: dm_update_crtc_state() failed\n");
12213 goto fail;
12214 }
12215 }
12216
12217 /* Enable all crtcs which require enable */
12218 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12219 ret = dm_update_crtc_state(&adev->dm, state, crtc,
12220 old_crtc_state,
12221 new_crtc_state,
12222 true,
12223 &lock_and_validation_needed);
12224 if (ret) {
12225 drm_dbg_atomic(dev, "ENABLE: dm_update_crtc_state() failed\n");
12226 goto fail;
12227 }
12228 }
12229
12230 /* Add new/modified planes */
12231 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
12232 ret = dm_update_plane_state(dc, state, plane,
12233 old_plane_state,
12234 new_plane_state,
12235 true,
12236 &lock_and_validation_needed,
12237 &is_top_most_overlay);
12238 if (ret) {
12239 drm_dbg_atomic(dev, "dm_update_plane_state() failed\n");
12240 goto fail;
12241 }
12242 }
12243
12244 #if defined(CONFIG_DRM_AMD_DC_FP)
12245 if (dc_resource_is_dsc_encoding_supported(dc)) {
12246 ret = pre_validate_dsc(state, &dm_state, vars);
12247 if (ret != 0)
12248 goto fail;
12249 }
12250 #endif
12251
12252 /* Run this here since we want to validate the streams we created */
12253 ret = drm_atomic_helper_check_planes(dev, state);
12254 if (ret) {
12255 drm_dbg_atomic(dev, "drm_atomic_helper_check_planes() failed\n");
12256 goto fail;
12257 }
12258
12259 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12260 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
12261 if (dm_new_crtc_state->mpo_requested)
12262 drm_dbg_atomic(dev, "MPO enablement requested on crtc:[%p]\n", crtc);
12263 }
12264
12265 /* Check cursor restrictions */
12266 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12267 enum amdgpu_dm_cursor_mode required_cursor_mode;
12268 int is_rotated, is_scaled;
12269
12270 /* Overlay cusor not subject to native cursor restrictions */
12271 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
12272 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
12273 continue;
12274
12275 /* Check if rotation or scaling is enabled on DCN401 */
12276 if ((drm_plane_mask(crtc->cursor) & new_crtc_state->plane_mask) &&
12277 amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) {
12278 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
12279
12280 is_rotated = new_cursor_state &&
12281 ((new_cursor_state->rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0);
12282 is_scaled = new_cursor_state && ((new_cursor_state->src_w >> 16 != new_cursor_state->crtc_w) ||
12283 (new_cursor_state->src_h >> 16 != new_cursor_state->crtc_h));
12284
12285 if (is_rotated || is_scaled) {
12286 drm_dbg_driver(
12287 crtc->dev,
12288 "[CRTC:%d:%s] cannot enable hardware cursor due to rotation/scaling\n",
12289 crtc->base.id, crtc->name);
12290 ret = -EINVAL;
12291 goto fail;
12292 }
12293 }
12294
12295 /* If HW can only do native cursor, check restrictions again */
12296 ret = dm_crtc_get_cursor_mode(adev, state, dm_new_crtc_state,
12297 &required_cursor_mode);
12298 if (ret) {
12299 drm_dbg_driver(crtc->dev,
12300 "[CRTC:%d:%s] Checking cursor mode failed\n",
12301 crtc->base.id, crtc->name);
12302 goto fail;
12303 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) {
12304 drm_dbg_driver(crtc->dev,
12305 "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n",
12306 crtc->base.id, crtc->name);
12307 ret = -EINVAL;
12308 goto fail;
12309 }
12310 }
12311
12312 if (state->legacy_cursor_update) {
12313 /*
12314 * This is a fast cursor update coming from the plane update
12315 * helper, check if it can be done asynchronously for better
12316 * performance.
12317 */
12318 state->async_update =
12319 !drm_atomic_helper_async_check(dev, state);
12320
12321 /*
12322 * Skip the remaining global validation if this is an async
12323 * update. Cursor updates can be done without affecting
12324 * state or bandwidth calcs and this avoids the performance
12325 * penalty of locking the private state object and
12326 * allocating a new dc_state.
12327 */
12328 if (state->async_update)
12329 return 0;
12330 }
12331
12332 /* Check scaling and underscan changes*/
12333 /* TODO Removed scaling changes validation due to inability to commit
12334 * new stream into context w\o causing full reset. Need to
12335 * decide how to handle.
12336 */
12337 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
12338 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
12339 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
12340 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
12341
12342 /* Skip any modesets/resets */
12343 if (!acrtc || drm_atomic_crtc_needs_modeset(
12344 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
12345 continue;
12346
12347 /* Skip any thing not scale or underscan changes */
12348 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
12349 continue;
12350
12351 lock_and_validation_needed = true;
12352 }
12353
12354 /* set the slot info for each mst_state based on the link encoding format */
12355 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
12356 struct amdgpu_dm_connector *aconnector;
12357 struct drm_connector *connector;
12358 struct drm_connector_list_iter iter;
12359 u8 link_coding_cap;
12360
12361 drm_connector_list_iter_begin(dev, &iter);
12362 drm_for_each_connector_iter(connector, &iter) {
12363 if (connector->index == mst_state->mgr->conn_base_id) {
12364 aconnector = to_amdgpu_dm_connector(connector);
12365 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
12366 drm_dp_mst_update_slots(mst_state, link_coding_cap);
12367
12368 break;
12369 }
12370 }
12371 drm_connector_list_iter_end(&iter);
12372 }
12373
12374 /**
12375 * Streams and planes are reset when there are changes that affect
12376 * bandwidth. Anything that affects bandwidth needs to go through
12377 * DC global validation to ensure that the configuration can be applied
12378 * to hardware.
12379 *
12380 * We have to currently stall out here in atomic_check for outstanding
12381 * commits to finish in this case because our IRQ handlers reference
12382 * DRM state directly - we can end up disabling interrupts too early
12383 * if we don't.
12384 *
12385 * TODO: Remove this stall and drop DM state private objects.
12386 */
12387 if (lock_and_validation_needed) {
12388 ret = dm_atomic_get_state(state, &dm_state);
12389 if (ret) {
12390 drm_dbg_atomic(dev, "dm_atomic_get_state() failed\n");
12391 goto fail;
12392 }
12393
12394 ret = do_aquire_global_lock(dev, state);
12395 if (ret) {
12396 drm_dbg_atomic(dev, "do_aquire_global_lock() failed\n");
12397 goto fail;
12398 }
12399
12400 #if defined(CONFIG_DRM_AMD_DC_FP)
12401 if (dc_resource_is_dsc_encoding_supported(dc)) {
12402 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
12403 if (ret) {
12404 drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n");
12405 ret = -EINVAL;
12406 goto fail;
12407 }
12408 }
12409 #endif
12410
12411 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
12412 if (ret) {
12413 drm_dbg_atomic(dev, "dm_update_mst_vcpi_slots_for_dsc() failed\n");
12414 goto fail;
12415 }
12416
12417 /*
12418 * Perform validation of MST topology in the state:
12419 * We need to perform MST atomic check before calling
12420 * dc_validate_global_state(), or there is a chance
12421 * to get stuck in an infinite loop and hang eventually.
12422 */
12423 ret = drm_dp_mst_atomic_check(state);
12424 if (ret) {
12425 drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
12426 goto fail;
12427 }
12428 status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
12429 if (status != DC_OK) {
12430 drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
12431 dc_status_to_str(status), status);
12432 ret = -EINVAL;
12433 goto fail;
12434 }
12435 } else {
12436 /*
12437 * The commit is a fast update. Fast updates shouldn't change
12438 * the DC context, affect global validation, and can have their
12439 * commit work done in parallel with other commits not touching
12440 * the same resource. If we have a new DC context as part of
12441 * the DM atomic state from validation we need to free it and
12442 * retain the existing one instead.
12443 *
12444 * Furthermore, since the DM atomic state only contains the DC
12445 * context and can safely be annulled, we can free the state
12446 * and clear the associated private object now to free
12447 * some memory and avoid a possible use-after-free later.
12448 */
12449
12450 for (i = 0; i < state->num_private_objs; i++) {
12451 struct drm_private_obj *obj = state->private_objs[i].ptr;
12452
12453 if (obj->funcs == adev->dm.atomic_obj.funcs) {
12454 int j = state->num_private_objs-1;
12455
12456 dm_atomic_destroy_state(obj,
12457 state->private_objs[i].state);
12458
12459 /* If i is not at the end of the array then the
12460 * last element needs to be moved to where i was
12461 * before the array can safely be truncated.
12462 */
12463 if (i != j)
12464 state->private_objs[i] =
12465 state->private_objs[j];
12466
12467 state->private_objs[j].ptr = NULL;
12468 state->private_objs[j].state = NULL;
12469 state->private_objs[j].old_state = NULL;
12470 state->private_objs[j].new_state = NULL;
12471
12472 state->num_private_objs = j;
12473 break;
12474 }
12475 }
12476 }
12477
12478 /* Store the overall update type for use later in atomic check. */
12479 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12480 struct dm_crtc_state *dm_new_crtc_state =
12481 to_dm_crtc_state(new_crtc_state);
12482
12483 /*
12484 * Only allow async flips for fast updates that don't change
12485 * the FB pitch, the DCC state, rotation, mem_type, etc.
12486 */
12487 if (new_crtc_state->async_flip &&
12488 (lock_and_validation_needed ||
12489 amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
12490 drm_dbg_atomic(crtc->dev,
12491 "[CRTC:%d:%s] async flips are only supported for fast updates\n",
12492 crtc->base.id, crtc->name);
12493 ret = -EINVAL;
12494 goto fail;
12495 }
12496
12497 dm_new_crtc_state->update_type = lock_and_validation_needed ?
12498 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
12499 }
12500
12501 /* Must be success */
12502 WARN_ON(ret);
12503
12504 trace_amdgpu_dm_atomic_check_finish(state, ret);
12505
12506 return ret;
12507
12508 fail:
12509 if (ret == -EDEADLK)
12510 drm_dbg_atomic(dev, "Atomic check stopped to avoid deadlock.\n");
12511 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
12512 drm_dbg_atomic(dev, "Atomic check stopped due to signal.\n");
12513 else
12514 drm_dbg_atomic(dev, "Atomic check failed with err: %d\n", ret);
12515
12516 trace_amdgpu_dm_atomic_check_finish(state, ret);
12517
12518 return ret;
12519 }
12520
dm_edid_parser_send_cea(struct amdgpu_display_manager * dm,unsigned int offset,unsigned int total_length,u8 * data,unsigned int length,struct amdgpu_hdmi_vsdb_info * vsdb)12521 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
12522 unsigned int offset,
12523 unsigned int total_length,
12524 u8 *data,
12525 unsigned int length,
12526 struct amdgpu_hdmi_vsdb_info *vsdb)
12527 {
12528 bool res;
12529 union dmub_rb_cmd cmd;
12530 struct dmub_cmd_send_edid_cea *input;
12531 struct dmub_cmd_edid_cea_output *output;
12532
12533 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
12534 return false;
12535
12536 memset(&cmd, 0, sizeof(cmd));
12537
12538 input = &cmd.edid_cea.data.input;
12539
12540 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
12541 cmd.edid_cea.header.sub_type = 0;
12542 cmd.edid_cea.header.payload_bytes =
12543 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
12544 input->offset = offset;
12545 input->length = length;
12546 input->cea_total_length = total_length;
12547 memcpy(input->payload, data, length);
12548
12549 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
12550 if (!res) {
12551 drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
12552 return false;
12553 }
12554
12555 output = &cmd.edid_cea.data.output;
12556
12557 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
12558 if (!output->ack.success) {
12559 drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
12560 output->ack.offset);
12561 }
12562 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
12563 if (!output->amd_vsdb.vsdb_found)
12564 return false;
12565
12566 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
12567 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
12568 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
12569 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
12570 } else {
12571 drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
12572 return false;
12573 }
12574
12575 return true;
12576 }
12577
parse_edid_cea_dmcu(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)12578 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
12579 u8 *edid_ext, int len,
12580 struct amdgpu_hdmi_vsdb_info *vsdb_info)
12581 {
12582 int i;
12583
12584 /* send extension block to DMCU for parsing */
12585 for (i = 0; i < len; i += 8) {
12586 bool res;
12587 int offset;
12588
12589 /* send 8 bytes a time */
12590 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
12591 return false;
12592
12593 if (i+8 == len) {
12594 /* EDID block sent completed, expect result */
12595 int version, min_rate, max_rate;
12596
12597 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
12598 if (res) {
12599 /* amd vsdb found */
12600 vsdb_info->freesync_supported = 1;
12601 vsdb_info->amd_vsdb_version = version;
12602 vsdb_info->min_refresh_rate_hz = min_rate;
12603 vsdb_info->max_refresh_rate_hz = max_rate;
12604 return true;
12605 }
12606 /* not amd vsdb */
12607 return false;
12608 }
12609
12610 /* check for ack*/
12611 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
12612 if (!res)
12613 return false;
12614 }
12615
12616 return false;
12617 }
12618
parse_edid_cea_dmub(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)12619 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
12620 u8 *edid_ext, int len,
12621 struct amdgpu_hdmi_vsdb_info *vsdb_info)
12622 {
12623 int i;
12624
12625 /* send extension block to DMCU for parsing */
12626 for (i = 0; i < len; i += 8) {
12627 /* send 8 bytes a time */
12628 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
12629 return false;
12630 }
12631
12632 return vsdb_info->freesync_supported;
12633 }
12634
parse_edid_cea(struct amdgpu_dm_connector * aconnector,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)12635 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
12636 u8 *edid_ext, int len,
12637 struct amdgpu_hdmi_vsdb_info *vsdb_info)
12638 {
12639 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
12640 bool ret;
12641
12642 mutex_lock(&adev->dm.dc_lock);
12643 if (adev->dm.dmub_srv)
12644 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
12645 else
12646 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
12647 mutex_unlock(&adev->dm.dc_lock);
12648 return ret;
12649 }
12650
parse_edid_displayid_vrr(struct drm_connector * connector,const struct edid * edid)12651 static void parse_edid_displayid_vrr(struct drm_connector *connector,
12652 const struct edid *edid)
12653 {
12654 u8 *edid_ext = NULL;
12655 int i;
12656 int j = 0;
12657 u16 min_vfreq;
12658 u16 max_vfreq;
12659
12660 if (edid == NULL || edid->extensions == 0)
12661 return;
12662
12663 /* Find DisplayID extension */
12664 for (i = 0; i < edid->extensions; i++) {
12665 edid_ext = (void *)(edid + (i + 1));
12666 if (edid_ext[0] == DISPLAYID_EXT)
12667 break;
12668 }
12669
12670 if (edid_ext == NULL)
12671 return;
12672
12673 while (j < EDID_LENGTH) {
12674 /* Get dynamic video timing range from DisplayID if available */
12675 if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
12676 (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
12677 min_vfreq = edid_ext[j+9];
12678 if (edid_ext[j+1] & 7)
12679 max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
12680 else
12681 max_vfreq = edid_ext[j+10];
12682
12683 if (max_vfreq && min_vfreq) {
12684 connector->display_info.monitor_range.max_vfreq = max_vfreq;
12685 connector->display_info.monitor_range.min_vfreq = min_vfreq;
12686
12687 return;
12688 }
12689 }
12690 j++;
12691 }
12692 }
12693
parse_amd_vsdb(struct amdgpu_dm_connector * aconnector,const struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)12694 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
12695 const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
12696 {
12697 u8 *edid_ext = NULL;
12698 int i;
12699 int j = 0;
12700
12701 if (edid == NULL || edid->extensions == 0)
12702 return -ENODEV;
12703
12704 /* Find DisplayID extension */
12705 for (i = 0; i < edid->extensions; i++) {
12706 edid_ext = (void *)(edid + (i + 1));
12707 if (edid_ext[0] == DISPLAYID_EXT)
12708 break;
12709 }
12710
12711 while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
12712 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
12713 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
12714
12715 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
12716 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
12717 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
12718 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
12719 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
12720
12721 return true;
12722 }
12723 j++;
12724 }
12725
12726 return false;
12727 }
12728
parse_hdmi_amd_vsdb(struct amdgpu_dm_connector * aconnector,const struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)12729 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
12730 const struct edid *edid,
12731 struct amdgpu_hdmi_vsdb_info *vsdb_info)
12732 {
12733 u8 *edid_ext = NULL;
12734 int i;
12735 bool valid_vsdb_found = false;
12736
12737 /*----- drm_find_cea_extension() -----*/
12738 /* No EDID or EDID extensions */
12739 if (edid == NULL || edid->extensions == 0)
12740 return -ENODEV;
12741
12742 /* Find CEA extension */
12743 for (i = 0; i < edid->extensions; i++) {
12744 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
12745 if (edid_ext[0] == CEA_EXT)
12746 break;
12747 }
12748
12749 if (i == edid->extensions)
12750 return -ENODEV;
12751
12752 /*----- cea_db_offsets() -----*/
12753 if (edid_ext[0] != CEA_EXT)
12754 return -ENODEV;
12755
12756 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
12757
12758 return valid_vsdb_found ? i : -ENODEV;
12759 }
12760
12761 /**
12762 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
12763 *
12764 * @connector: Connector to query.
12765 * @drm_edid: DRM EDID from monitor
12766 *
12767 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
12768 * track of some of the display information in the internal data struct used by
12769 * amdgpu_dm. This function checks which type of connector we need to set the
12770 * FreeSync parameters.
12771 */
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,const struct drm_edid * drm_edid)12772 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
12773 const struct drm_edid *drm_edid)
12774 {
12775 int i = 0;
12776 struct amdgpu_dm_connector *amdgpu_dm_connector =
12777 to_amdgpu_dm_connector(connector);
12778 struct dm_connector_state *dm_con_state = NULL;
12779 struct dc_sink *sink;
12780 struct amdgpu_device *adev = drm_to_adev(connector->dev);
12781 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
12782 const struct edid *edid;
12783 bool freesync_capable = false;
12784 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
12785
12786 if (!connector->state) {
12787 drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
12788 goto update;
12789 }
12790
12791 sink = amdgpu_dm_connector->dc_sink ?
12792 amdgpu_dm_connector->dc_sink :
12793 amdgpu_dm_connector->dc_em_sink;
12794
12795 drm_edid_connector_update(connector, drm_edid);
12796
12797 if (!drm_edid || !sink) {
12798 dm_con_state = to_dm_connector_state(connector->state);
12799
12800 amdgpu_dm_connector->min_vfreq = 0;
12801 amdgpu_dm_connector->max_vfreq = 0;
12802 freesync_capable = false;
12803
12804 goto update;
12805 }
12806
12807 dm_con_state = to_dm_connector_state(connector->state);
12808
12809 if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
12810 goto update;
12811
12812 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
12813
12814 /* Some eDP panels only have the refresh rate range info in DisplayID */
12815 if ((connector->display_info.monitor_range.min_vfreq == 0 ||
12816 connector->display_info.monitor_range.max_vfreq == 0))
12817 parse_edid_displayid_vrr(connector, edid);
12818
12819 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
12820 sink->sink_signal == SIGNAL_TYPE_EDP)) {
12821 if (amdgpu_dm_connector->dc_link &&
12822 amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
12823 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
12824 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
12825 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12826 freesync_capable = true;
12827 }
12828
12829 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12830
12831 if (vsdb_info.replay_mode) {
12832 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
12833 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
12834 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
12835 }
12836
12837 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
12838 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12839 if (i >= 0 && vsdb_info.freesync_supported) {
12840 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
12841 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
12842 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12843 freesync_capable = true;
12844
12845 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
12846 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
12847 }
12848 }
12849
12850 if (amdgpu_dm_connector->dc_link)
12851 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
12852
12853 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
12854 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
12855 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
12856
12857 amdgpu_dm_connector->pack_sdp_v1_3 = true;
12858 amdgpu_dm_connector->as_type = as_type;
12859 amdgpu_dm_connector->vsdb_info = vsdb_info;
12860
12861 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
12862 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
12863 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
12864 freesync_capable = true;
12865
12866 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
12867 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
12868 }
12869 }
12870
12871 update:
12872 if (dm_con_state)
12873 dm_con_state->freesync_capable = freesync_capable;
12874
12875 if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable &&
12876 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) {
12877 amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false;
12878 amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false;
12879 }
12880
12881 if (connector->vrr_capable_property)
12882 drm_connector_set_vrr_capable_property(connector,
12883 freesync_capable);
12884 }
12885
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)12886 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
12887 {
12888 struct amdgpu_device *adev = drm_to_adev(dev);
12889 struct dc *dc = adev->dm.dc;
12890 int i;
12891
12892 mutex_lock(&adev->dm.dc_lock);
12893 if (dc->current_state) {
12894 for (i = 0; i < dc->current_state->stream_count; ++i)
12895 dc->current_state->streams[i]
12896 ->triggered_crtc_reset.enabled =
12897 adev->dm.force_timing_sync;
12898
12899 dm_enable_per_frame_crtc_master_sync(dc->current_state);
12900 dc_trigger_sync(dc, dc->current_state);
12901 }
12902 mutex_unlock(&adev->dm.dc_lock);
12903 }
12904
amdgpu_dm_exit_ips_for_hw_access(struct dc * dc)12905 static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
12906 {
12907 if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
12908 dc_exit_ips_for_hw_access(dc);
12909 }
12910
dm_write_reg_func(const struct dc_context * ctx,uint32_t address,u32 value,const char * func_name)12911 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
12912 u32 value, const char *func_name)
12913 {
12914 #ifdef DM_CHECK_ADDR_0
12915 if (address == 0) {
12916 drm_err(adev_to_drm(ctx->driver_context),
12917 "invalid register write. address = 0");
12918 return;
12919 }
12920 #endif
12921
12922 amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
12923 cgs_write_register(ctx->cgs_device, address, value);
12924 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
12925 }
12926
dm_read_reg_func(const struct dc_context * ctx,uint32_t address,const char * func_name)12927 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
12928 const char *func_name)
12929 {
12930 u32 value;
12931 #ifdef DM_CHECK_ADDR_0
12932 if (address == 0) {
12933 drm_err(adev_to_drm(ctx->driver_context),
12934 "invalid register read; address = 0\n");
12935 return 0;
12936 }
12937 #endif
12938
12939 if (ctx->dmub_srv &&
12940 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
12941 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
12942 ASSERT(false);
12943 return 0;
12944 }
12945
12946 amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
12947
12948 value = cgs_read_register(ctx->cgs_device, address);
12949
12950 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
12951
12952 return value;
12953 }
12954
amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context * ctx,unsigned int link_index,struct aux_payload * payload,enum aux_return_code_type * operation_result)12955 int amdgpu_dm_process_dmub_aux_transfer_sync(
12956 struct dc_context *ctx,
12957 unsigned int link_index,
12958 struct aux_payload *payload,
12959 enum aux_return_code_type *operation_result)
12960 {
12961 struct amdgpu_device *adev = ctx->driver_context;
12962 struct dmub_notification *p_notify = adev->dm.dmub_notify;
12963 int ret = -1;
12964
12965 mutex_lock(&adev->dm.dpia_aux_lock);
12966 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
12967 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12968 goto out;
12969 }
12970
12971 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
12972 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
12973 *operation_result = AUX_RET_ERROR_TIMEOUT;
12974 goto out;
12975 }
12976
12977 if (p_notify->result != AUX_RET_SUCCESS) {
12978 /*
12979 * Transient states before tunneling is enabled could
12980 * lead to this error. We can ignore this for now.
12981 */
12982 if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
12983 drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
12984 payload->address, payload->length,
12985 p_notify->result);
12986 }
12987 *operation_result = p_notify->result;
12988 goto out;
12989 }
12990
12991 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
12992 if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
12993 /* The reply is stored in the top nibble of the command. */
12994 payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
12995
12996 /*write req may receive a byte indicating partially written number as well*/
12997 if (p_notify->aux_reply.length)
12998 memcpy(payload->data, p_notify->aux_reply.data,
12999 p_notify->aux_reply.length);
13000
13001 /* success */
13002 ret = p_notify->aux_reply.length;
13003 *operation_result = p_notify->result;
13004 out:
13005 reinit_completion(&adev->dm.dmub_aux_transfer_done);
13006 mutex_unlock(&adev->dm.dpia_aux_lock);
13007 return ret;
13008 }
13009
abort_fused_io(struct dc_context * ctx,const struct dmub_cmd_fused_request * request)13010 static void abort_fused_io(
13011 struct dc_context *ctx,
13012 const struct dmub_cmd_fused_request *request
13013 )
13014 {
13015 union dmub_rb_cmd command = { 0 };
13016 struct dmub_rb_cmd_fused_io *io = &command.fused_io;
13017
13018 io->header.type = DMUB_CMD__FUSED_IO;
13019 io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
13020 io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
13021 io->request = *request;
13022 dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
13023 }
13024
execute_fused_io(struct amdgpu_device * dev,struct dc_context * ctx,union dmub_rb_cmd * commands,uint8_t count,uint32_t timeout_us)13025 static bool execute_fused_io(
13026 struct amdgpu_device *dev,
13027 struct dc_context *ctx,
13028 union dmub_rb_cmd *commands,
13029 uint8_t count,
13030 uint32_t timeout_us
13031 )
13032 {
13033 const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
13034
13035 if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
13036 return false;
13037
13038 struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
13039 struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
13040 const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
13041 && first->header.ret_status
13042 && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
13043
13044 if (!result)
13045 return false;
13046
13047 while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
13048 reinit_completion(&sync->replied);
13049
13050 struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
13051
13052 static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
13053
13054 if (reply->identifier == first->request.identifier) {
13055 first->request = *reply;
13056 return true;
13057 }
13058 }
13059
13060 reinit_completion(&sync->replied);
13061 first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
13062 abort_fused_io(ctx, &first->request);
13063 return false;
13064 }
13065
amdgpu_dm_execute_fused_io(struct amdgpu_device * dev,struct dc_link * link,union dmub_rb_cmd * commands,uint8_t count,uint32_t timeout_us)13066 bool amdgpu_dm_execute_fused_io(
13067 struct amdgpu_device *dev,
13068 struct dc_link *link,
13069 union dmub_rb_cmd *commands,
13070 uint8_t count,
13071 uint32_t timeout_us)
13072 {
13073 struct amdgpu_display_manager *dm = &dev->dm;
13074
13075 mutex_lock(&dm->dpia_aux_lock);
13076
13077 const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
13078
13079 mutex_unlock(&dm->dpia_aux_lock);
13080 return result;
13081 }
13082
amdgpu_dm_process_dmub_set_config_sync(struct dc_context * ctx,unsigned int link_index,struct set_config_cmd_payload * payload,enum set_config_status * operation_result)13083 int amdgpu_dm_process_dmub_set_config_sync(
13084 struct dc_context *ctx,
13085 unsigned int link_index,
13086 struct set_config_cmd_payload *payload,
13087 enum set_config_status *operation_result)
13088 {
13089 struct amdgpu_device *adev = ctx->driver_context;
13090 bool is_cmd_complete;
13091 int ret;
13092
13093 mutex_lock(&adev->dm.dpia_aux_lock);
13094 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
13095 link_index, payload, adev->dm.dmub_notify);
13096
13097 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
13098 ret = 0;
13099 *operation_result = adev->dm.dmub_notify->sc_status;
13100 } else {
13101 drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
13102 ret = -1;
13103 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
13104 }
13105
13106 if (!is_cmd_complete)
13107 reinit_completion(&adev->dm.dmub_aux_transfer_done);
13108 mutex_unlock(&adev->dm.dpia_aux_lock);
13109 return ret;
13110 }
13111
dm_execute_dmub_cmd(const struct dc_context * ctx,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)13112 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
13113 {
13114 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
13115 }
13116
dm_execute_dmub_cmd_list(const struct dc_context * ctx,unsigned int count,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)13117 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
13118 {
13119 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
13120 }
13121
dm_acpi_process_phy_transition_interlock(const struct dc_context * ctx,struct dm_process_phy_transition_init_params process_phy_transition_init_params)13122 void dm_acpi_process_phy_transition_interlock(
13123 const struct dc_context *ctx,
13124 struct dm_process_phy_transition_init_params process_phy_transition_init_params)
13125 {
13126 // Not yet implemented
13127 }
13128