1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26 #include <drm/drm_vblank.h>
27 #include <drm/drm_atomic_helper.h>
28
29 #include "dc.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm_psr.h"
32 #include "amdgpu_dm_replay.h"
33 #include "amdgpu_dm_crtc.h"
34 #include "amdgpu_dm_plane.h"
35 #include "amdgpu_dm_trace.h"
36 #include "amdgpu_dm_debugfs.h"
37
38 #define HPD_DETECTION_PERIOD_uS 2000000
39 #define HPD_DETECTION_TIME_uS 100000
40
amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc * acrtc)41 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
42 {
43 struct drm_crtc *crtc = &acrtc->base;
44 struct drm_device *dev = crtc->dev;
45 unsigned long flags;
46
47 drm_crtc_handle_vblank(crtc);
48
49 spin_lock_irqsave(&dev->event_lock, flags);
50
51 /* Send completion event for cursor-only commits */
52 if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
53 drm_crtc_send_vblank_event(crtc, acrtc->event);
54 drm_crtc_vblank_put(crtc);
55 acrtc->event = NULL;
56 }
57
58 spin_unlock_irqrestore(&dev->event_lock, flags);
59 }
60
amdgpu_dm_crtc_modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)61 bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
62 struct dc_stream_state *new_stream,
63 struct dc_stream_state *old_stream)
64 {
65 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
66 }
67
amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc * acrtc)68 bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc)
69
70 {
71 return acrtc->dm_irq_params.freesync_config.state ==
72 VRR_STATE_ACTIVE_VARIABLE ||
73 acrtc->dm_irq_params.freesync_config.state ==
74 VRR_STATE_ACTIVE_FIXED;
75 }
76
amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc * crtc,bool enable)77 int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
78 {
79 enum dc_irq_source irq_source;
80 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
81 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
82 int rc;
83
84 if (acrtc->otg_inst == -1)
85 return 0;
86
87 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
88
89 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
90
91 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
92 acrtc->crtc_id, enable ? "en" : "dis", rc);
93 return rc;
94 }
95
amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state * dm_state)96 bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
97 {
98 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
99 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
100 }
101
102 /**
103 * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features.
104 * @dm: amdgpu display manager instance.
105 * @acrtc: CRTC whose panel self-refresh state is being updated.
106 * @stream: DC stream associated with @acrtc.
107 * @vblank_enabled: Whether the DRM vblank counter is currently enabled.
108 * @allow_sr_entry: Whether entry into self-refresh mode is allowed.
109 *
110 * The DRM vblank counter enable/disable action is used as the trigger to enable
111 * or disable various panel self-refresh features:
112 *
113 * Panel Replay and PSR SU
114 * - Enable when:
115 * - VRR is disabled
116 * - vblank counter is disabled
117 * - entry is allowed: usermode demonstrates an adequate number of fast
118 * commits
119 * - CRC capture window isn't active
120 * - Keep enabled even when vblank counter gets enabled
121 *
122 * PSR1
123 * - Enable condition same as above
124 * - Disable when vblank counter is enabled
125 */
amdgpu_dm_crtc_set_panel_sr_feature(struct amdgpu_display_manager * dm,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream,bool vblank_enabled,bool allow_sr_entry)126 void amdgpu_dm_crtc_set_panel_sr_feature(
127 struct amdgpu_display_manager *dm,
128 struct amdgpu_crtc *acrtc,
129 struct dc_stream_state *stream,
130 bool vblank_enabled, bool allow_sr_entry)
131 {
132 struct dc_link *link = stream->link;
133 bool is_sr_active = (link->replay_settings.replay_allow_active ||
134 link->psr_settings.psr_allow_active);
135 bool is_crc_window_active = false;
136 bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
137
138 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
139 is_crc_window_active =
140 amdgpu_dm_crc_window_is_activated(&acrtc->base);
141 #endif
142
143 if (link->replay_settings.replay_feature_enabled && !vrr_active &&
144 allow_sr_entry && !is_sr_active && !is_crc_window_active) {
145 amdgpu_dm_replay_enable(stream, true);
146 } else if (vblank_enabled) {
147 if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
148 amdgpu_dm_psr_disable(stream, false);
149 } else if (link->psr_settings.psr_feature_enabled && !vrr_active &&
150 allow_sr_entry && !is_sr_active && !is_crc_window_active) {
151
152 struct amdgpu_dm_connector *aconn =
153 (struct amdgpu_dm_connector *) stream->dm_stream_context;
154
155 if (!aconn->disallow_edp_enter_psr) {
156 amdgpu_dm_psr_enable(stream);
157 if (dm->idle_workqueue &&
158 (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) &&
159 dm->dc->idle_optimizations_allowed &&
160 dm->idle_workqueue->enable &&
161 !dm->idle_workqueue->running)
162 schedule_work(&dm->idle_workqueue->work);
163 }
164 }
165 }
166
amdgpu_dm_is_headless(struct amdgpu_device * adev)167 bool amdgpu_dm_is_headless(struct amdgpu_device *adev)
168 {
169 struct drm_connector *connector;
170 struct drm_connector_list_iter iter;
171 struct drm_device *dev;
172 bool is_headless = true;
173
174 if (adev == NULL)
175 return true;
176
177 dev = adev->dm.ddev;
178
179 drm_connector_list_iter_begin(dev, &iter);
180 drm_for_each_connector_iter(connector, &iter) {
181
182 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
183 continue;
184
185 if (connector->status == connector_status_connected) {
186 is_headless = false;
187 break;
188 }
189 }
190 drm_connector_list_iter_end(&iter);
191 return is_headless;
192 }
193
amdgpu_dm_idle_worker(struct work_struct * work)194 static void amdgpu_dm_idle_worker(struct work_struct *work)
195 {
196 struct idle_workqueue *idle_work;
197
198 idle_work = container_of(work, struct idle_workqueue, work);
199 idle_work->dm->idle_workqueue->running = true;
200
201 while (idle_work->enable) {
202 fsleep(HPD_DETECTION_PERIOD_uS);
203 mutex_lock(&idle_work->dm->dc_lock);
204 if (!idle_work->dm->dc->idle_optimizations_allowed) {
205 mutex_unlock(&idle_work->dm->dc_lock);
206 break;
207 }
208 dc_allow_idle_optimizations(idle_work->dm->dc, false);
209
210 mutex_unlock(&idle_work->dm->dc_lock);
211 fsleep(HPD_DETECTION_TIME_uS);
212 mutex_lock(&idle_work->dm->dc_lock);
213
214 if (!amdgpu_dm_is_headless(idle_work->dm->adev) &&
215 !amdgpu_dm_psr_is_active_allowed(idle_work->dm)) {
216 mutex_unlock(&idle_work->dm->dc_lock);
217 break;
218 }
219
220 if (idle_work->enable) {
221 dc_post_update_surfaces_to_stream(idle_work->dm->dc);
222 dc_allow_idle_optimizations(idle_work->dm->dc, true);
223 }
224 mutex_unlock(&idle_work->dm->dc_lock);
225 }
226 idle_work->dm->idle_workqueue->running = false;
227 }
228
idle_create_workqueue(struct amdgpu_device * adev)229 struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev)
230 {
231 struct idle_workqueue *idle_work;
232
233 idle_work = kzalloc_obj(*idle_work);
234 if (ZERO_OR_NULL_PTR(idle_work))
235 return NULL;
236
237 idle_work->dm = &adev->dm;
238 idle_work->enable = false;
239 idle_work->running = false;
240 INIT_WORK(&idle_work->work, amdgpu_dm_idle_worker);
241
242 return idle_work;
243 }
244
amdgpu_dm_crtc_vblank_control_worker(struct work_struct * work)245 static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
246 {
247 struct vblank_control_work *vblank_work =
248 container_of(work, struct vblank_control_work, work);
249 struct amdgpu_display_manager *dm = vblank_work->dm;
250
251 mutex_lock(&dm->dc_lock);
252
253 if (vblank_work->enable) {
254 dm->active_vblank_irq_count++;
255 amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism,
256 DM_ISM_EVENT_EXIT_IDLE_REQUESTED);
257 } else {
258 if (dm->active_vblank_irq_count > 0)
259 dm->active_vblank_irq_count--;
260 amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism,
261 DM_ISM_EVENT_ENTER_IDLE_REQUESTED);
262 }
263
264 mutex_unlock(&dm->dc_lock);
265
266 dc_stream_release(vblank_work->stream);
267
268 kfree(vblank_work);
269 }
270
amdgpu_dm_crtc_set_vblank(struct drm_crtc * crtc,bool enable)271 static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
272 {
273 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
274 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
275 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
276 struct amdgpu_display_manager *dm = &adev->dm;
277 struct vblank_control_work *work;
278 int irq_type;
279 int rc = 0;
280
281 if (enable && !acrtc->base.enabled) {
282 drm_dbg_vbl(crtc->dev,
283 "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
284 acrtc->crtc_id, acrtc->base.enabled);
285 return -EINVAL;
286 }
287
288 irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
289
290 if (enable) {
291 struct dc *dc = adev->dm.dc;
292 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
293 struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
294 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
295 bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
296 pr->config.replay_supported;
297
298 /*
299 * IPS & self-refresh feature can cause vblank counter resets between
300 * vblank disable and enable.
301 * It may cause system stuck due to waiting for the vblank counter.
302 * Call this function to estimate missed vblanks by using timestamps and
303 * update the vblank counter in DRM.
304 */
305 if (dc->caps.ips_support &&
306 dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
307 sr_supported && vblank->config.disable_immediate)
308 drm_crtc_vblank_restore(crtc);
309 }
310
311 if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
312 if (enable) {
313 /* vblank irq on -> Only need vupdate irq in vrr mode */
314 if (amdgpu_dm_crtc_vrr_active(acrtc_state))
315 rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
316 } else {
317 /* vblank irq off -> vupdate irq off */
318 rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
319 }
320 }
321
322 if (rc)
323 return rc;
324
325 /* crtc vblank or vstartup interrupt */
326 if (enable) {
327 rc = amdgpu_irq_get(adev, &adev->crtc_irq, irq_type);
328 drm_dbg_vbl(crtc->dev, "Get crtc_irq ret=%d\n", rc);
329 } else {
330 rc = amdgpu_irq_put(adev, &adev->crtc_irq, irq_type);
331 drm_dbg_vbl(crtc->dev, "Put crtc_irq ret=%d\n", rc);
332 }
333
334 if (rc)
335 return rc;
336
337 /*
338 * hubp surface flip interrupt
339 *
340 * We have no guarantee that the frontend index maps to the same
341 * backend index - some even map to more than one.
342 *
343 * TODO: Use a different interrupt or check DC itself for the mapping.
344 */
345 if (enable) {
346 rc = amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type);
347 drm_dbg_vbl(crtc->dev, "Get pageflip_irq ret=%d\n", rc);
348 } else {
349 rc = amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type);
350 drm_dbg_vbl(crtc->dev, "Put pageflip_irq ret=%d\n", rc);
351 }
352
353 if (rc)
354 return rc;
355
356 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
357 /* crtc vline0 interrupt, only available on DCN+ */
358 if (amdgpu_ip_version(adev, DCE_HWIP, 0) != 0) {
359 if (enable) {
360 rc = amdgpu_irq_get(adev, &adev->vline0_irq, irq_type);
361 drm_dbg_vbl(crtc->dev, "Get vline0_irq ret=%d\n", rc);
362 } else {
363 rc = amdgpu_irq_put(adev, &adev->vline0_irq, irq_type);
364 drm_dbg_vbl(crtc->dev, "Put vline0_irq ret=%d\n", rc);
365 }
366
367 if (rc)
368 return rc;
369 }
370 #endif
371
372 if (amdgpu_in_reset(adev))
373 return 0;
374
375 if (dm->vblank_control_workqueue) {
376 work = kzalloc_obj(*work, GFP_ATOMIC);
377 if (!work)
378 return -ENOMEM;
379
380 INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker);
381 work->dm = dm;
382 work->acrtc = acrtc;
383 work->enable = enable;
384
385 if (acrtc_state->stream) {
386 dc_stream_retain(acrtc_state->stream);
387 work->stream = acrtc_state->stream;
388 }
389
390 queue_work(dm->vblank_control_workqueue, &work->work);
391 }
392
393 return 0;
394 }
395
amdgpu_dm_crtc_enable_vblank(struct drm_crtc * crtc)396 int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
397 {
398 return amdgpu_dm_crtc_set_vblank(crtc, true);
399 }
400
amdgpu_dm_crtc_disable_vblank(struct drm_crtc * crtc)401 void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
402 {
403 amdgpu_dm_crtc_set_vblank(crtc, false);
404 }
405
amdgpu_dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)406 static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc,
407 struct drm_crtc_state *state)
408 {
409 struct dm_crtc_state *cur = to_dm_crtc_state(state);
410
411 /* TODO Destroy dc_stream objects are stream object is flattened */
412 if (cur->stream)
413 dc_stream_release(cur->stream);
414
415
416 __drm_atomic_helper_crtc_destroy_state(state);
417
418
419 kfree(state);
420 }
421
amdgpu_dm_crtc_duplicate_state(struct drm_crtc * crtc)422 static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc)
423 {
424 struct dm_crtc_state *state, *cur;
425
426 cur = to_dm_crtc_state(crtc->state);
427
428 if (WARN_ON(!crtc->state))
429 return NULL;
430
431 state = kzalloc_obj(*state);
432 if (!state)
433 return NULL;
434
435 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
436
437 if (cur->stream) {
438 state->stream = cur->stream;
439 dc_stream_retain(state->stream);
440 }
441
442 state->active_planes = cur->active_planes;
443 state->vrr_infopacket = cur->vrr_infopacket;
444 state->abm_level = cur->abm_level;
445 state->vrr_supported = cur->vrr_supported;
446 state->freesync_config = cur->freesync_config;
447 state->cm_has_degamma = cur->cm_has_degamma;
448 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
449 state->regamma_tf = cur->regamma_tf;
450 state->crc_skip_count = cur->crc_skip_count;
451 state->mpo_requested = cur->mpo_requested;
452 state->cursor_mode = cur->cursor_mode;
453 /* TODO Duplicate dc_stream after objects are stream object is flattened */
454
455 return &state->base;
456 }
457
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)458 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
459 {
460 /*
461 * amdgpu_dm_ism_fini() is intentionally called in amdgpu_dm_fini().
462 * It must be called before dc_destroy() in amdgpu_dm_fini()
463 * to avoid ISM accessing an invalid dc handle once dc is released.
464 */
465
466 drm_crtc_cleanup(crtc);
467 kfree(crtc);
468 }
469
amdgpu_dm_crtc_reset_state(struct drm_crtc * crtc)470 static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc)
471 {
472 struct dm_crtc_state *state;
473
474 if (crtc->state)
475 amdgpu_dm_crtc_destroy_state(crtc, crtc->state);
476
477 state = kzalloc_obj(*state);
478 if (WARN_ON(!state))
479 return;
480
481 __drm_atomic_helper_crtc_reset(crtc, &state->base);
482 }
483
484 #ifdef CONFIG_DEBUG_FS
amdgpu_dm_crtc_late_register(struct drm_crtc * crtc)485 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
486 {
487 crtc_debugfs_init(crtc);
488
489 return 0;
490 }
491 #endif
492
493 #ifdef AMD_PRIVATE_COLOR
494 /**
495 * dm_crtc_additional_color_mgmt - enable additional color properties
496 * @crtc: DRM CRTC
497 *
498 * This function lets the driver enable post-blending CRTC regamma transfer
499 * function property in addition to DRM CRTC gamma LUT. Default value means
500 * linear transfer function, which is the default CRTC gamma LUT behaviour
501 * without this property.
502 */
503 static void
dm_crtc_additional_color_mgmt(struct drm_crtc * crtc)504 dm_crtc_additional_color_mgmt(struct drm_crtc *crtc)
505 {
506 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
507
508 if (adev->dm.dc->caps.color.mpc.ogam_ram)
509 drm_object_attach_property(&crtc->base,
510 adev->mode_info.regamma_tf_property,
511 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
512 }
513
514 static int
amdgpu_dm_atomic_crtc_set_property(struct drm_crtc * crtc,struct drm_crtc_state * state,struct drm_property * property,uint64_t val)515 amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc,
516 struct drm_crtc_state *state,
517 struct drm_property *property,
518 uint64_t val)
519 {
520 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
521 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
522
523 if (property == adev->mode_info.regamma_tf_property) {
524 if (acrtc_state->regamma_tf != val) {
525 acrtc_state->regamma_tf = val;
526 acrtc_state->base.color_mgmt_changed |= 1;
527 }
528 } else {
529 drm_dbg_atomic(crtc->dev,
530 "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
531 crtc->base.id, crtc->name,
532 property->base.id, property->name);
533 return -EINVAL;
534 }
535
536 return 0;
537 }
538
539 static int
amdgpu_dm_atomic_crtc_get_property(struct drm_crtc * crtc,const struct drm_crtc_state * state,struct drm_property * property,uint64_t * val)540 amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc,
541 const struct drm_crtc_state *state,
542 struct drm_property *property,
543 uint64_t *val)
544 {
545 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
546 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
547
548 if (property == adev->mode_info.regamma_tf_property)
549 *val = acrtc_state->regamma_tf;
550 else
551 return -EINVAL;
552
553 return 0;
554 }
555 #endif
556
557 /* Implemented only the options currently available for the driver */
558 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
559 .reset = amdgpu_dm_crtc_reset_state,
560 .destroy = amdgpu_dm_crtc_destroy,
561 .set_config = drm_atomic_helper_set_config,
562 .page_flip = drm_atomic_helper_page_flip,
563 .atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state,
564 .atomic_destroy_state = amdgpu_dm_crtc_destroy_state,
565 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
566 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
567 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
568 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
569 .enable_vblank = amdgpu_dm_crtc_enable_vblank,
570 .disable_vblank = amdgpu_dm_crtc_disable_vblank,
571 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
572 #if defined(CONFIG_DEBUG_FS)
573 .late_register = amdgpu_dm_crtc_late_register,
574 #endif
575 #ifdef AMD_PRIVATE_COLOR
576 .atomic_set_property = amdgpu_dm_atomic_crtc_set_property,
577 .atomic_get_property = amdgpu_dm_atomic_crtc_get_property,
578 #endif
579 };
580
amdgpu_dm_crtc_helper_disable(struct drm_crtc * crtc)581 static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc)
582 {
583 }
584
amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)585 static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
586 {
587 struct drm_atomic_state *state = new_crtc_state->state;
588 struct drm_plane *plane;
589 int num_active = 0;
590
591 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
592 struct drm_plane_state *new_plane_state;
593
594 /* Cursor planes are "fake". */
595 if (plane->type == DRM_PLANE_TYPE_CURSOR)
596 continue;
597
598 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
599
600 if (!new_plane_state) {
601 /*
602 * The plane is enable on the CRTC and hasn't changed
603 * state. This means that it previously passed
604 * validation and is therefore enabled.
605 */
606 num_active += 1;
607 continue;
608 }
609
610 /* We need a framebuffer to be considered enabled. */
611 num_active += (new_plane_state->fb != NULL);
612 }
613
614 return num_active;
615 }
616
amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)617 static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc,
618 struct drm_crtc_state *new_crtc_state)
619 {
620 struct dm_crtc_state *dm_new_crtc_state =
621 to_dm_crtc_state(new_crtc_state);
622
623 dm_new_crtc_state->active_planes = 0;
624
625 if (!dm_new_crtc_state->stream)
626 return;
627
628 dm_new_crtc_state->active_planes =
629 amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state);
630 }
631
amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)632 static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
633 const struct drm_display_mode *mode,
634 struct drm_display_mode *adjusted_mode)
635 {
636 return true;
637 }
638
amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)639 static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
640 struct drm_atomic_state *state)
641 {
642 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
643 crtc);
644 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
645 struct dc *dc = adev->dm.dc;
646 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
647 int ret = -EINVAL;
648
649 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
650
651 amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state);
652
653 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
654 amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
655 return ret;
656 }
657
658 /*
659 * We require the primary plane to be enabled whenever the CRTC is, otherwise
660 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
661 * planes are disabled, which is not supported by the hardware. And there is legacy
662 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
663 */
664 if (crtc_state->enable &&
665 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
666 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
667 return -EINVAL;
668 }
669
670 /*
671 * Only allow async flips for fast updates that don't change the FB
672 * pitch, the DCC state, rotation, etc.
673 */
674 if (crtc_state->async_flip &&
675 dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
676 drm_dbg_atomic(crtc->dev,
677 "[CRTC:%d:%s] async flips are only supported for fast updates\n",
678 crtc->base.id, crtc->name);
679 return -EINVAL;
680 }
681
682 if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
683 struct drm_plane_state *primary_state;
684
685 /* Pull in primary plane for correct VRR handling */
686 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
687 if (IS_ERR(primary_state))
688 return PTR_ERR(primary_state);
689 }
690
691 /* In some use cases, like reset, no stream is attached */
692 if (!dm_crtc_state->stream)
693 return 0;
694
695 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
696 return 0;
697
698 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
699 return ret;
700 }
701
702 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
703 .disable = amdgpu_dm_crtc_helper_disable,
704 .atomic_check = amdgpu_dm_crtc_helper_atomic_check,
705 .mode_fixup = amdgpu_dm_crtc_helper_mode_fixup,
706 .get_scanout_position = amdgpu_crtc_get_scanout_position,
707 };
708
709 /*
710 * This hysteresis filter as configured will:
711 *
712 * * Search through the latest 8[filter_history_size] entries in history,
713 * skipping entries that are older than [filter_old_history_threshold] frames
714 * (0 means ignore age)
715 * * Searches for short-idle-periods that lasted shorter than
716 * 4[filter_num_frames] frames-times
717 * * If there is at least 1[filter_entry_count] short-idle-period, then a delay
718 * of 4[activation_num_delay_frames] will applied before allowing idle
719 * optimizations again.
720 * * An additional delay of 11[sso_num_frames] is applied before enabling
721 * panel-specific optimizations.
722 *
723 * The values were determined empirically on another OS, optimizing for Z8
724 * residency on APUs when running a productivity + web browsing test.
725 *
726 * TODO: Run similar tests to determine if these values are also optimal for
727 * Linux, and if each APU generation benefits differently.
728 */
729 static struct amdgpu_dm_ism_config default_ism_config = {
730 .filter_num_frames = 4,
731 .filter_history_size = 8,
732 .filter_entry_count = 1,
733 .activation_num_delay_frames = 4,
734 .filter_old_history_threshold = 0,
735 .sso_num_frames = 11,
736 };
737
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)738 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
739 struct drm_plane *plane,
740 uint32_t crtc_index)
741 {
742 struct amdgpu_crtc *acrtc = NULL;
743 struct drm_plane *cursor_plane;
744 bool has_degamma;
745 int res = -ENOMEM;
746
747 cursor_plane = kzalloc_obj(*cursor_plane);
748 if (!cursor_plane)
749 goto fail;
750
751 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
752 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
753
754 acrtc = kzalloc_obj(struct amdgpu_crtc);
755 if (!acrtc)
756 goto fail;
757
758 res = drm_crtc_init_with_planes(
759 dm->ddev,
760 &acrtc->base,
761 plane,
762 cursor_plane,
763 &amdgpu_dm_crtc_funcs, NULL);
764
765 if (res)
766 goto fail;
767
768 amdgpu_dm_ism_init(&acrtc->ism, &default_ism_config);
769
770 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
771
772 /* Create (reset) the plane state */
773 if (acrtc->base.funcs->reset)
774 acrtc->base.funcs->reset(&acrtc->base);
775
776 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
777 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
778
779 acrtc->crtc_id = crtc_index;
780 acrtc->base.enabled = false;
781 acrtc->otg_inst = -1;
782
783 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
784
785 /* Don't enable DRM CRTC degamma property for
786 * 1. DCE since it doesn't support programmable degamma anywhere.
787 * 2. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
788 * Note: DEGAMMA properties are created even if the primary plane has the
789 * COLOR_PIPELINE property. User space can use either the DEGAMMA properties
790 * or the COLOR_PIPELINE property. An atomic commit which attempts to enable
791 * both is rejected.
792 */
793 has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
794 dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
795
796 drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
797 true, MAX_COLOR_LUT_ENTRIES);
798
799 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
800
801 #ifdef AMD_PRIVATE_COLOR
802 dm_crtc_additional_color_mgmt(&acrtc->base);
803 #endif
804 return 0;
805
806 fail:
807 kfree(acrtc);
808 kfree(cursor_plane);
809 return res;
810 }
811
812