1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "vmwgfx_vkms.h"
30
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34
35 #include "vmw_surface_cache.h"
36
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_debugfs_crc.h>
39 #include <drm/drm_print.h>
40 #include <drm/drm_vblank.h>
41
42 #include <linux/crc32.h>
43 #include <linux/delay.h>
44
45 #define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable"
46
47 static int
vmw_surface_sync(struct vmw_private * vmw,struct vmw_surface * surf)48 vmw_surface_sync(struct vmw_private *vmw,
49 struct vmw_surface *surf)
50 {
51 int ret;
52 struct vmw_fence_obj *fence = NULL;
53 struct vmw_bo *bo = surf->res.guest_memory_bo;
54
55 vmw_resource_clean(&surf->res);
56
57 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
58 if (ret != 0) {
59 drm_warn(&vmw->drm, "%s: failed reserve\n", __func__);
60 goto done;
61 }
62
63 ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL);
64 if (ret != 0) {
65 drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__);
66 ttm_bo_unreserve(&bo->tbo);
67 goto done;
68 }
69
70 dma_fence_wait(&fence->base, false);
71 dma_fence_put(&fence->base);
72
73 ttm_bo_unreserve(&bo->tbo);
74 done:
75 return ret;
76 }
77
78 static void
compute_crc(struct drm_crtc * crtc,struct vmw_surface * surf,u32 * crc)79 compute_crc(struct drm_crtc *crtc,
80 struct vmw_surface *surf,
81 u32 *crc)
82 {
83 u8 *mapped_surface;
84 struct vmw_bo *bo = surf->res.guest_memory_bo;
85 const struct SVGA3dSurfaceDesc *desc =
86 vmw_surface_get_desc(surf->metadata.format);
87 u32 row_pitch_bytes;
88 SVGA3dSize blocks;
89 u32 y;
90
91 *crc = 0;
92
93 vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks);
94 row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock;
95 WARN_ON(!bo);
96 mapped_surface = vmw_bo_map_and_cache(bo);
97
98 for (y = 0; y < blocks.height; y++) {
99 *crc = crc32_le(*crc, mapped_surface, row_pitch_bytes);
100 mapped_surface += row_pitch_bytes;
101 }
102
103 vmw_bo_unmap(bo);
104 }
105
106 static void
crc_generate_worker(struct work_struct * work)107 crc_generate_worker(struct work_struct *work)
108 {
109 struct vmw_display_unit *du =
110 container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
111 struct drm_crtc *crtc = &du->crtc;
112 struct vmw_private *vmw = vmw_priv(crtc->dev);
113 bool crc_pending;
114 u64 frame_start, frame_end;
115 u32 crc32 = 0;
116 struct vmw_surface *surf = 0;
117
118 spin_lock_irq(&du->vkms.crc_state_lock);
119 crc_pending = du->vkms.crc_pending;
120 spin_unlock_irq(&du->vkms.crc_state_lock);
121
122 /*
123 * We raced with the vblank hrtimer and previous work already computed
124 * the crc, nothing to do.
125 */
126 if (!crc_pending)
127 return;
128
129 spin_lock_irq(&du->vkms.crc_state_lock);
130 surf = vmw_surface_reference(du->vkms.surface);
131 spin_unlock_irq(&du->vkms.crc_state_lock);
132
133 if (surf) {
134 if (vmw_surface_sync(vmw, surf)) {
135 drm_warn(
136 crtc->dev,
137 "CRC worker wasn't able to sync the crc surface!\n");
138 return;
139 }
140
141 compute_crc(crtc, surf, &crc32);
142 vmw_surface_unreference(&surf);
143 }
144
145 spin_lock_irq(&du->vkms.crc_state_lock);
146 frame_start = du->vkms.frame_start;
147 frame_end = du->vkms.frame_end;
148 du->vkms.frame_start = 0;
149 du->vkms.frame_end = 0;
150 du->vkms.crc_pending = false;
151 spin_unlock_irq(&du->vkms.crc_state_lock);
152
153 /*
154 * The worker can fall behind the vblank hrtimer, make sure we catch up.
155 */
156 while (frame_start <= frame_end)
157 drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
158 }
159
160 static enum hrtimer_restart
vmw_vkms_vblank_simulate(struct hrtimer * timer)161 vmw_vkms_vblank_simulate(struct hrtimer *timer)
162 {
163 struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
164 struct drm_crtc *crtc = &du->crtc;
165 struct vmw_private *vmw = vmw_priv(crtc->dev);
166 bool has_surface = false;
167 u64 ret_overrun;
168 bool locked, ret;
169
170 ret_overrun = hrtimer_forward_now(&du->vkms.timer,
171 du->vkms.period_ns);
172 if (ret_overrun != 1)
173 drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n",
174 ret_overrun - 1);
175
176 locked = vmw_vkms_vblank_trylock(crtc);
177 ret = drm_crtc_handle_vblank(crtc);
178 WARN_ON(!ret);
179 if (!locked)
180 return HRTIMER_RESTART;
181 has_surface = du->vkms.surface != NULL;
182 vmw_vkms_unlock(crtc);
183
184 if (du->vkms.crc_enabled && has_surface) {
185 u64 frame = drm_crtc_accurate_vblank_count(crtc);
186
187 spin_lock(&du->vkms.crc_state_lock);
188 if (!du->vkms.crc_pending)
189 du->vkms.frame_start = frame;
190 else
191 drm_dbg_driver(crtc->dev,
192 "crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
193 du->vkms.frame_start, frame);
194 du->vkms.frame_end = frame;
195 du->vkms.crc_pending = true;
196 spin_unlock(&du->vkms.crc_state_lock);
197
198 ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work);
199 if (!ret)
200 drm_dbg_driver(crtc->dev, "Composer worker already queued\n");
201 }
202
203 return HRTIMER_RESTART;
204 }
205
206 void
vmw_vkms_init(struct vmw_private * vmw)207 vmw_vkms_init(struct vmw_private *vmw)
208 {
209 char buffer[64];
210 const size_t max_buf_len = sizeof(buffer) - 1;
211 size_t buf_len = max_buf_len;
212 int ret;
213
214 vmw->vkms_enabled = false;
215
216 ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len);
217 if (ret || buf_len > max_buf_len)
218 return;
219 buffer[buf_len] = '\0';
220
221 ret = kstrtobool(buffer, &vmw->vkms_enabled);
222 if (!ret && vmw->vkms_enabled) {
223 ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS);
224 vmw->vkms_enabled = (ret == 0);
225 }
226
227 vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0);
228 if (!vmw->crc_workq) {
229 drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms.");
230 vmw->vkms_enabled = false;
231 }
232 if (vmw->vkms_enabled)
233 drm_info(&vmw->drm, "VKMS enabled\n");
234 }
235
236 void
vmw_vkms_cleanup(struct vmw_private * vmw)237 vmw_vkms_cleanup(struct vmw_private *vmw)
238 {
239 destroy_workqueue(vmw->crc_workq);
240 }
241
242 bool
vmw_vkms_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)243 vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
244 int *max_error,
245 ktime_t *vblank_time,
246 bool in_vblank_irq)
247 {
248 struct drm_device *dev = crtc->dev;
249 struct vmw_private *vmw = vmw_priv(dev);
250 unsigned int pipe = crtc->index;
251 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
252 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
253
254 if (!vmw->vkms_enabled)
255 return false;
256
257 if (!READ_ONCE(vblank->enabled)) {
258 *vblank_time = ktime_get();
259 return true;
260 }
261
262 *vblank_time = READ_ONCE(du->vkms.timer.node.expires);
263
264 if (WARN_ON(*vblank_time == vblank->time))
265 return true;
266
267 /*
268 * To prevent races we roll the hrtimer forward before we do any
269 * interrupt processing - this is how real hw works (the interrupt is
270 * only generated after all the vblank registers are updated) and what
271 * the vblank core expects. Therefore we need to always correct the
272 * timestampe by one frame.
273 */
274 *vblank_time -= du->vkms.period_ns;
275
276 return true;
277 }
278
279 int
vmw_vkms_enable_vblank(struct drm_crtc * crtc)280 vmw_vkms_enable_vblank(struct drm_crtc *crtc)
281 {
282 struct drm_device *dev = crtc->dev;
283 struct vmw_private *vmw = vmw_priv(dev);
284 unsigned int pipe = drm_crtc_index(crtc);
285 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
286 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
287
288 if (!vmw->vkms_enabled)
289 return -EINVAL;
290
291 drm_calc_timestamping_constants(crtc, &crtc->mode);
292
293 hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
294 du->vkms.timer.function = &vmw_vkms_vblank_simulate;
295 du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
296 hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
297
298 return 0;
299 }
300
301 void
vmw_vkms_disable_vblank(struct drm_crtc * crtc)302 vmw_vkms_disable_vblank(struct drm_crtc *crtc)
303 {
304 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
305 struct vmw_private *vmw = vmw_priv(crtc->dev);
306
307 if (!vmw->vkms_enabled)
308 return;
309
310 hrtimer_cancel(&du->vkms.timer);
311 du->vkms.surface = NULL;
312 du->vkms.period_ns = ktime_set(0, 0);
313 }
314
315 enum vmw_vkms_lock_state {
316 VMW_VKMS_LOCK_UNLOCKED = 0,
317 VMW_VKMS_LOCK_MODESET = 1,
318 VMW_VKMS_LOCK_VBLANK = 2
319 };
320
321 void
vmw_vkms_crtc_init(struct drm_crtc * crtc)322 vmw_vkms_crtc_init(struct drm_crtc *crtc)
323 {
324 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
325
326 atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
327 spin_lock_init(&du->vkms.crc_state_lock);
328
329 INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker);
330 du->vkms.surface = NULL;
331 }
332
333 void
vmw_vkms_crtc_cleanup(struct drm_crtc * crtc)334 vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
335 {
336 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
337
338 if (du->vkms.surface)
339 vmw_surface_unreference(&du->vkms.surface);
340 WARN_ON(work_pending(&du->vkms.crc_generator_work));
341 hrtimer_cancel(&du->vkms.timer);
342 }
343
344 void
vmw_vkms_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)345 vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
346 struct drm_atomic_state *state)
347 {
348 struct vmw_private *vmw = vmw_priv(crtc->dev);
349
350 if (vmw->vkms_enabled)
351 vmw_vkms_modeset_lock(crtc);
352 }
353
354 void
vmw_vkms_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)355 vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
356 struct drm_atomic_state *state)
357 {
358 unsigned long flags;
359 struct vmw_private *vmw = vmw_priv(crtc->dev);
360
361 if (!vmw->vkms_enabled)
362 return;
363
364 if (crtc->state->event) {
365 spin_lock_irqsave(&crtc->dev->event_lock, flags);
366
367 if (drm_crtc_vblank_get(crtc) != 0)
368 drm_crtc_send_vblank_event(crtc, crtc->state->event);
369 else
370 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
371
372 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
373
374 crtc->state->event = NULL;
375 }
376
377 vmw_vkms_unlock(crtc);
378 }
379
380 void
vmw_vkms_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)381 vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
382 struct drm_atomic_state *state)
383 {
384 struct vmw_private *vmw = vmw_priv(crtc->dev);
385
386 if (vmw->vkms_enabled)
387 drm_crtc_vblank_on(crtc);
388 }
389
390 void
vmw_vkms_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)391 vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
392 struct drm_atomic_state *state)
393 {
394 struct vmw_private *vmw = vmw_priv(crtc->dev);
395
396 if (vmw->vkms_enabled)
397 drm_crtc_vblank_off(crtc);
398 }
399
400 static bool
is_crc_supported(struct drm_crtc * crtc)401 is_crc_supported(struct drm_crtc *crtc)
402 {
403 struct vmw_private *vmw = vmw_priv(crtc->dev);
404
405 if (!vmw->vkms_enabled)
406 return false;
407
408 if (vmw->active_display_unit != vmw_du_screen_target)
409 return false;
410
411 return true;
412 }
413
414 static const char * const pipe_crc_sources[] = {"auto"};
415
416 static int
crc_parse_source(const char * src_name,bool * enabled)417 crc_parse_source(const char *src_name,
418 bool *enabled)
419 {
420 int ret = 0;
421
422 if (!src_name) {
423 *enabled = false;
424 } else if (strcmp(src_name, "auto") == 0) {
425 *enabled = true;
426 } else {
427 *enabled = false;
428 ret = -EINVAL;
429 }
430
431 return ret;
432 }
433
434 const char *const *
vmw_vkms_get_crc_sources(struct drm_crtc * crtc,size_t * count)435 vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
436 size_t *count)
437 {
438 *count = 0;
439 if (!is_crc_supported(crtc))
440 return NULL;
441
442 *count = ARRAY_SIZE(pipe_crc_sources);
443 return pipe_crc_sources;
444 }
445
446 int
vmw_vkms_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)447 vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
448 const char *src_name,
449 size_t *values_cnt)
450 {
451 bool enabled;
452
453 if (!is_crc_supported(crtc))
454 return -EINVAL;
455
456 if (crc_parse_source(src_name, &enabled) < 0) {
457 drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name);
458 return -EINVAL;
459 }
460
461 *values_cnt = 1;
462
463 return 0;
464 }
465
466 int
vmw_vkms_set_crc_source(struct drm_crtc * crtc,const char * src_name)467 vmw_vkms_set_crc_source(struct drm_crtc *crtc,
468 const char *src_name)
469 {
470 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
471 bool enabled, prev_enabled, locked;
472 int ret;
473
474 if (!is_crc_supported(crtc))
475 return -EINVAL;
476
477 ret = crc_parse_source(src_name, &enabled);
478
479 if (enabled)
480 drm_crtc_vblank_get(crtc);
481
482 locked = vmw_vkms_modeset_lock_relaxed(crtc);
483 prev_enabled = du->vkms.crc_enabled;
484 du->vkms.crc_enabled = enabled;
485 if (locked)
486 vmw_vkms_unlock(crtc);
487
488 if (prev_enabled)
489 drm_crtc_vblank_put(crtc);
490
491 return ret;
492 }
493
494 void
vmw_vkms_set_crc_surface(struct drm_crtc * crtc,struct vmw_surface * surf)495 vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
496 struct vmw_surface *surf)
497 {
498 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
499 struct vmw_private *vmw = vmw_priv(crtc->dev);
500
501 if (vmw->vkms_enabled && du->vkms.surface != surf) {
502 WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
503 if (du->vkms.surface)
504 vmw_surface_unreference(&du->vkms.surface);
505 if (surf)
506 du->vkms.surface = vmw_surface_reference(surf);
507 }
508 }
509
510 /**
511 * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
512 * @du: The vmw_display_unit from which to grab the vblank timings
513 *
514 * Returns the maximum wait time used to acquire the vkms lock. By
515 * default uses a time of a single frame and in case where vblank
516 * was not initialized for the display unit 1/60th of a second.
517 */
518 static inline u64
vmw_vkms_lock_max_wait_ns(struct vmw_display_unit * du)519 vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du)
520 {
521 s64 nsecs = ktime_to_ns(du->vkms.period_ns);
522
523 return (nsecs > 0) ? nsecs : 16666666;
524 }
525
526 /**
527 * vmw_vkms_modeset_lock - Protects access to crtc during modeset
528 * @crtc: The crtc to lock for vkms
529 *
530 * This function prevents the VKMS timers/callbacks from being called
531 * while a modeset operation is in process. We don't want the callbacks
532 * e.g. the vblank simulator to be trying to access incomplete state
533 * so we need to make sure they execute only when the modeset has
534 * finished.
535 *
536 * Normally this would have been done with a spinlock but locking the
537 * entire atomic modeset with vmwgfx is impossible because kms prepare
538 * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
539 * guard various bits of state). Which means that we need to synchronize
540 * atomic context (the vblank handler) with the non-atomic entirity
541 * of kms - so use an atomic_t to track which part of vkms has access
542 * to the basic vkms state.
543 */
544 void
vmw_vkms_modeset_lock(struct drm_crtc * crtc)545 vmw_vkms_modeset_lock(struct drm_crtc *crtc)
546 {
547 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
548 const u64 nsecs_delay = 10;
549 const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
550 u64 total_delay = 0;
551 int ret;
552
553 do {
554 ret = atomic_cmpxchg(&du->vkms.atomic_lock,
555 VMW_VKMS_LOCK_UNLOCKED,
556 VMW_VKMS_LOCK_MODESET);
557 if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY)
558 break;
559 ndelay(nsecs_delay);
560 total_delay += nsecs_delay;
561 } while (1);
562
563 if (total_delay >= MAX_NSECS_DELAY) {
564 drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n",
565 total_delay, ret, atomic_read(&du->vkms.atomic_lock));
566 }
567 }
568
569 /**
570 * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
571 * @crtc: The crtc to lock for vkms
572 *
573 * Much like vmw_vkms_modeset_lock except that when the crtc is currently
574 * in a modeset it will return immediately.
575 *
576 * Returns true if actually locked vkms to modeset or false otherwise.
577 */
578 bool
vmw_vkms_modeset_lock_relaxed(struct drm_crtc * crtc)579 vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc)
580 {
581 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
582 const u64 nsecs_delay = 10;
583 const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
584 u64 total_delay = 0;
585 int ret;
586
587 do {
588 ret = atomic_cmpxchg(&du->vkms.atomic_lock,
589 VMW_VKMS_LOCK_UNLOCKED,
590 VMW_VKMS_LOCK_MODESET);
591 if (ret == VMW_VKMS_LOCK_UNLOCKED ||
592 ret == VMW_VKMS_LOCK_MODESET ||
593 total_delay >= MAX_NSECS_DELAY)
594 break;
595 ndelay(nsecs_delay);
596 total_delay += nsecs_delay;
597 } while (1);
598
599 if (total_delay >= MAX_NSECS_DELAY) {
600 drm_warn(crtc->dev, "VKMS relaxed lock expired!\n");
601 return false;
602 }
603
604 return ret == VMW_VKMS_LOCK_UNLOCKED;
605 }
606
607 /**
608 * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
609 * @crtc: The crtc to lock for vkms
610 *
611 * Tries to lock vkms for vblank, returns immediately.
612 *
613 * Returns true if locked vkms to vblank or false otherwise.
614 */
615 bool
vmw_vkms_vblank_trylock(struct drm_crtc * crtc)616 vmw_vkms_vblank_trylock(struct drm_crtc *crtc)
617 {
618 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
619 u32 ret;
620
621 ret = atomic_cmpxchg(&du->vkms.atomic_lock,
622 VMW_VKMS_LOCK_UNLOCKED,
623 VMW_VKMS_LOCK_VBLANK);
624
625 return ret == VMW_VKMS_LOCK_UNLOCKED;
626 }
627
628 void
vmw_vkms_unlock(struct drm_crtc * crtc)629 vmw_vkms_unlock(struct drm_crtc *crtc)
630 {
631 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
632
633 /* Release flag; mark it as unlocked. */
634 atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
635 }
636