1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2014 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <drm/drm_atomic_uapi.h> 8 #include <drm/drm_vblank.h> 9 10 #include "msm_atomic_trace.h" 11 #include "msm_drv.h" 12 #include "msm_gem.h" 13 #include "msm_kms.h" 14 15 /* 16 * Helpers to control vblanks while we flush.. basically just to ensure 17 * that vblank accounting is switched on, so we get valid seqn/timestamp 18 * on pageflip events (if requested) 19 */ 20 21 static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) 22 { 23 struct drm_crtc *crtc; 24 25 for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 26 if (!crtc->state->active) 27 continue; 28 drm_crtc_vblank_get(crtc); 29 } 30 } 31 32 static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) 33 { 34 struct drm_crtc *crtc; 35 36 for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 37 if (!crtc->state->active) 38 continue; 39 drm_crtc_vblank_put(crtc); 40 } 41 } 42 43 static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 44 { 45 int crtc_index; 46 struct drm_crtc *crtc; 47 48 for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 49 crtc_index = drm_crtc_index(crtc); 50 mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); 51 } 52 } 53 54 static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 55 { 56 struct drm_crtc *crtc; 57 58 for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) 59 mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]); 60 } 61 62 static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) 63 { 64 unsigned crtc_mask = BIT(crtc_idx); 65 66 trace_msm_atomic_async_commit_start(crtc_mask); 67 68 lock_crtcs(kms, crtc_mask); 69 70 if (!(kms->pending_crtc_mask & crtc_mask)) { 71 unlock_crtcs(kms, crtc_mask); 72 goto out; 73 } 74 75 kms->pending_crtc_mask &= ~crtc_mask; 76 77 kms->funcs->enable_commit(kms); 78 79 vblank_get(kms, crtc_mask); 80 81 /* 82 * Flush hardware updates: 83 */ 84 trace_msm_atomic_flush_commit(crtc_mask); 85 kms->funcs->flush_commit(kms, crtc_mask); 86 87 /* 88 * Wait for flush to complete: 89 */ 90 trace_msm_atomic_wait_flush_start(crtc_mask); 91 kms->funcs->wait_flush(kms, crtc_mask); 92 trace_msm_atomic_wait_flush_finish(crtc_mask); 93 94 vblank_put(kms, crtc_mask); 95 96 kms->funcs->complete_commit(kms, crtc_mask); 97 unlock_crtcs(kms, crtc_mask); 98 kms->funcs->disable_commit(kms); 99 100 out: 101 trace_msm_atomic_async_commit_finish(crtc_mask); 102 } 103 104 static void msm_atomic_pending_work(struct kthread_work *work) 105 { 106 struct msm_pending_timer *timer = container_of(work, 107 struct msm_pending_timer, work.work); 108 109 msm_atomic_async_commit(timer->kms, timer->crtc_idx); 110 } 111 112 int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 113 struct msm_kms *kms, int crtc_idx) 114 { 115 timer->kms = kms; 116 timer->crtc_idx = crtc_idx; 117 118 timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx); 119 if (IS_ERR(timer->worker)) { 120 int ret = PTR_ERR(timer->worker); 121 timer->worker = NULL; 122 return ret; 123 } 124 sched_set_fifo(timer->worker->task); 125 126 msm_hrtimer_work_init(&timer->work, timer->worker, 127 msm_atomic_pending_work, 128 CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 129 130 return 0; 131 } 132 133 void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) 134 { 135 if (timer->worker) 136 kthread_destroy_worker(timer->worker); 137 } 138 139 static bool can_do_async(struct drm_atomic_state *state, 140 struct drm_crtc **async_crtc) 141 { 142 struct drm_connector_state *connector_state; 143 struct drm_connector *connector; 144 struct drm_crtc_state *crtc_state; 145 struct drm_crtc *crtc; 146 int i, num_crtcs = 0; 147 148 if (!(state->legacy_cursor_update || state->async_update)) 149 return false; 150 151 /* any connector change, means slow path: */ 152 for_each_new_connector_in_state(state, connector, connector_state, i) 153 return false; 154 155 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 156 if (drm_atomic_crtc_needs_modeset(crtc_state)) 157 return false; 158 if (!crtc_state->active) 159 return false; 160 if (++num_crtcs > 1) 161 return false; 162 *async_crtc = crtc; 163 } 164 165 return true; 166 } 167 168 /* Get bitmask of crtcs that will need to be flushed. The bitmask 169 * can be used with for_each_crtc_mask() iterator, to iterate 170 * effected crtcs without needing to preserve the atomic state. 171 */ 172 static unsigned get_crtc_mask(struct drm_atomic_state *state) 173 { 174 struct drm_crtc_state *crtc_state; 175 struct drm_crtc *crtc; 176 unsigned i, mask = 0; 177 178 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 179 mask |= drm_crtc_mask(crtc); 180 181 return mask; 182 } 183 184 int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 185 { 186 struct msm_drm_private *priv = dev->dev_private; 187 struct msm_kms *kms = priv->kms; 188 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 189 struct drm_crtc *crtc; 190 int i, ret = 0; 191 192 /* 193 * FIXME: stop setting allow_modeset and move this check to the DPU 194 * driver. 195 */ 196 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 197 new_crtc_state, i) { 198 if ((old_crtc_state->ctm && !new_crtc_state->ctm) || 199 (!old_crtc_state->ctm && new_crtc_state->ctm)) { 200 new_crtc_state->mode_changed = true; 201 state->allow_modeset = true; 202 } 203 } 204 205 if (kms && kms->funcs && kms->funcs->check_mode_changed) 206 ret = kms->funcs->check_mode_changed(kms, state); 207 if (ret) 208 return ret; 209 210 return drm_atomic_helper_check(dev, state); 211 } 212 213 void msm_atomic_commit_tail(struct drm_atomic_state *state) 214 { 215 struct drm_device *dev = state->dev; 216 struct msm_drm_private *priv = dev->dev_private; 217 struct msm_kms *kms = priv->kms; 218 struct drm_crtc *async_crtc = NULL; 219 unsigned crtc_mask = get_crtc_mask(state); 220 bool async = can_do_async(state, &async_crtc); 221 222 trace_msm_atomic_commit_tail_start(async, crtc_mask); 223 224 kms->funcs->enable_commit(kms); 225 226 /* 227 * Ensure any previous (potentially async) commit has 228 * completed: 229 */ 230 lock_crtcs(kms, crtc_mask); 231 trace_msm_atomic_wait_flush_start(crtc_mask); 232 kms->funcs->wait_flush(kms, crtc_mask); 233 trace_msm_atomic_wait_flush_finish(crtc_mask); 234 235 atomic_set(&kms->fault_snapshot_capture, 0); 236 237 /* 238 * Now that there is no in-progress flush, prepare the 239 * current update: 240 */ 241 if (kms->funcs->prepare_commit) 242 kms->funcs->prepare_commit(kms, state); 243 244 /* 245 * Push atomic updates down to hardware: 246 */ 247 drm_atomic_helper_commit_modeset_disables(dev, state); 248 drm_atomic_helper_commit_planes(dev, state, 0); 249 drm_atomic_helper_commit_modeset_enables(dev, state); 250 251 if (async) { 252 struct msm_pending_timer *timer = 253 &kms->pending_timers[drm_crtc_index(async_crtc)]; 254 255 /* async updates are limited to single-crtc updates: */ 256 WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); 257 258 /* 259 * Start timer if we don't already have an update pending 260 * on this crtc: 261 */ 262 if (!(kms->pending_crtc_mask & crtc_mask)) { 263 ktime_t vsync_time, wakeup_time; 264 265 kms->pending_crtc_mask |= crtc_mask; 266 267 if (drm_crtc_next_vblank_start(async_crtc, &vsync_time)) 268 goto fallback; 269 270 wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); 271 272 msm_hrtimer_queue_work(&timer->work, wakeup_time, 273 HRTIMER_MODE_ABS); 274 } 275 276 kms->funcs->disable_commit(kms); 277 unlock_crtcs(kms, crtc_mask); 278 /* 279 * At this point, from drm core's perspective, we 280 * are done with the atomic update, so we can just 281 * go ahead and signal that it is done: 282 */ 283 drm_atomic_helper_commit_hw_done(state); 284 drm_atomic_helper_cleanup_planes(dev, state); 285 286 trace_msm_atomic_commit_tail_finish(async, crtc_mask); 287 288 return; 289 } 290 291 fallback: 292 /* 293 * If there is any async flush pending on updated crtcs, fold 294 * them into the current flush. 295 */ 296 kms->pending_crtc_mask &= ~crtc_mask; 297 298 vblank_get(kms, crtc_mask); 299 300 /* 301 * Flush hardware updates: 302 */ 303 trace_msm_atomic_flush_commit(crtc_mask); 304 kms->funcs->flush_commit(kms, crtc_mask); 305 unlock_crtcs(kms, crtc_mask); 306 /* 307 * Wait for flush to complete: 308 */ 309 trace_msm_atomic_wait_flush_start(crtc_mask); 310 kms->funcs->wait_flush(kms, crtc_mask); 311 trace_msm_atomic_wait_flush_finish(crtc_mask); 312 313 vblank_put(kms, crtc_mask); 314 315 lock_crtcs(kms, crtc_mask); 316 kms->funcs->complete_commit(kms, crtc_mask); 317 unlock_crtcs(kms, crtc_mask); 318 kms->funcs->disable_commit(kms); 319 320 drm_atomic_helper_commit_hw_done(state); 321 drm_atomic_helper_cleanup_planes(dev, state); 322 323 trace_msm_atomic_commit_tail_finish(async, crtc_mask); 324 } 325