Lines Matching +full:software +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
53 struct vmw_bo *bo = surf->res.guest_memory_bo; in vmw_surface_sync()
55 vmw_resource_clean(&surf->res); in vmw_surface_sync()
57 ret = ttm_bo_reserve(&bo->tbo, false, false, NULL); in vmw_surface_sync()
59 drm_warn(&vmw->drm, "%s: failed reserve\n", __func__); in vmw_surface_sync()
65 drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__); in vmw_surface_sync()
66 ttm_bo_unreserve(&bo->tbo); in vmw_surface_sync()
70 dma_fence_wait(&fence->base, false); in vmw_surface_sync()
71 dma_fence_put(&fence->base); in vmw_surface_sync()
73 ttm_bo_unreserve(&bo->tbo); in vmw_surface_sync()
84 struct vmw_bo *bo = surf->res.guest_memory_bo; in compute_crc()
86 vmw_surface_get_desc(surf->metadata.format); in compute_crc()
93 vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks); in compute_crc()
94 row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock; in compute_crc()
111 struct drm_crtc *crtc = &du->crtc; in crc_generate_worker()
112 struct vmw_private *vmw = vmw_priv(crtc->dev); in crc_generate_worker()
118 spin_lock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
119 crc_pending = du->vkms.crc_pending; in crc_generate_worker()
120 spin_unlock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
129 spin_lock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
130 surf = vmw_surface_reference(du->vkms.surface); in crc_generate_worker()
131 spin_unlock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
136 crtc->dev, in crc_generate_worker()
145 spin_lock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
146 frame_start = du->vkms.frame_start; in crc_generate_worker()
147 frame_end = du->vkms.frame_end; in crc_generate_worker()
148 du->vkms.frame_start = 0; in crc_generate_worker()
149 du->vkms.frame_end = 0; in crc_generate_worker()
150 du->vkms.crc_pending = false; in crc_generate_worker()
151 spin_unlock_irq(&du->vkms.crc_state_lock); in crc_generate_worker()
164 struct drm_crtc *crtc = &du->crtc; in vmw_vkms_vblank_simulate()
165 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_vblank_simulate()
168 bool locked, ret; in vmw_vkms_vblank_simulate() local
170 ret_overrun = hrtimer_forward_now(&du->vkms.timer, in vmw_vkms_vblank_simulate()
171 du->vkms.period_ns); in vmw_vkms_vblank_simulate()
173 drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n", in vmw_vkms_vblank_simulate()
174 ret_overrun - 1); in vmw_vkms_vblank_simulate()
176 locked = vmw_vkms_vblank_trylock(crtc); in vmw_vkms_vblank_simulate()
179 if (!locked) in vmw_vkms_vblank_simulate()
181 has_surface = du->vkms.surface != NULL; in vmw_vkms_vblank_simulate()
184 if (du->vkms.crc_enabled && has_surface) { in vmw_vkms_vblank_simulate()
187 spin_lock(&du->vkms.crc_state_lock); in vmw_vkms_vblank_simulate()
188 if (!du->vkms.crc_pending) in vmw_vkms_vblank_simulate()
189 du->vkms.frame_start = frame; in vmw_vkms_vblank_simulate()
191 drm_dbg_driver(crtc->dev, in vmw_vkms_vblank_simulate()
193 du->vkms.frame_start, frame); in vmw_vkms_vblank_simulate()
194 du->vkms.frame_end = frame; in vmw_vkms_vblank_simulate()
195 du->vkms.crc_pending = true; in vmw_vkms_vblank_simulate()
196 spin_unlock(&du->vkms.crc_state_lock); in vmw_vkms_vblank_simulate()
198 ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work); in vmw_vkms_vblank_simulate()
200 drm_dbg_driver(crtc->dev, "Composer worker already queued\n"); in vmw_vkms_vblank_simulate()
210 const size_t max_buf_len = sizeof(buffer) - 1; in vmw_vkms_init()
214 vmw->vkms_enabled = false; in vmw_vkms_init()
221 ret = kstrtobool(buffer, &vmw->vkms_enabled); in vmw_vkms_init()
222 if (!ret && vmw->vkms_enabled) { in vmw_vkms_init()
223 ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS); in vmw_vkms_init()
224 vmw->vkms_enabled = (ret == 0); in vmw_vkms_init()
227 vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0); in vmw_vkms_init()
228 if (!vmw->crc_workq) { in vmw_vkms_init()
229 drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms."); in vmw_vkms_init()
230 vmw->vkms_enabled = false; in vmw_vkms_init()
232 if (vmw->vkms_enabled) in vmw_vkms_init()
233 drm_info(&vmw->drm, "VKMS enabled\n"); in vmw_vkms_init()
239 destroy_workqueue(vmw->crc_workq); in vmw_vkms_cleanup()
248 struct drm_device *dev = crtc->dev; in vmw_vkms_get_vblank_timestamp()
250 unsigned int pipe = crtc->index; in vmw_vkms_get_vblank_timestamp()
252 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; in vmw_vkms_get_vblank_timestamp()
254 if (!vmw->vkms_enabled) in vmw_vkms_get_vblank_timestamp()
257 if (!READ_ONCE(vblank->enabled)) { in vmw_vkms_get_vblank_timestamp()
262 *vblank_time = READ_ONCE(du->vkms.timer.node.expires); in vmw_vkms_get_vblank_timestamp()
264 if (WARN_ON(*vblank_time == vblank->time)) in vmw_vkms_get_vblank_timestamp()
269 * interrupt processing - this is how real hw works (the interrupt is in vmw_vkms_get_vblank_timestamp()
274 *vblank_time -= du->vkms.period_ns; in vmw_vkms_get_vblank_timestamp()
282 struct drm_device *dev = crtc->dev; in vmw_vkms_enable_vblank()
285 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; in vmw_vkms_enable_vblank()
288 if (!vmw->vkms_enabled) in vmw_vkms_enable_vblank()
289 return -EINVAL; in vmw_vkms_enable_vblank()
291 drm_calc_timestamping_constants(crtc, &crtc->mode); in vmw_vkms_enable_vblank()
293 hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in vmw_vkms_enable_vblank()
294 du->vkms.timer.function = &vmw_vkms_vblank_simulate; in vmw_vkms_enable_vblank()
295 du->vkms.period_ns = ktime_set(0, vblank->framedur_ns); in vmw_vkms_enable_vblank()
296 hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL); in vmw_vkms_enable_vblank()
305 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_disable_vblank()
307 if (!vmw->vkms_enabled) in vmw_vkms_disable_vblank()
310 hrtimer_cancel(&du->vkms.timer); in vmw_vkms_disable_vblank()
311 du->vkms.surface = NULL; in vmw_vkms_disable_vblank()
312 du->vkms.period_ns = ktime_set(0, 0); in vmw_vkms_disable_vblank()
326 atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED); in vmw_vkms_crtc_init()
327 spin_lock_init(&du->vkms.crc_state_lock); in vmw_vkms_crtc_init()
329 INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker); in vmw_vkms_crtc_init()
330 du->vkms.surface = NULL; in vmw_vkms_crtc_init()
338 if (du->vkms.surface) in vmw_vkms_crtc_cleanup()
339 vmw_surface_unreference(&du->vkms.surface); in vmw_vkms_crtc_cleanup()
340 WARN_ON(work_pending(&du->vkms.crc_generator_work)); in vmw_vkms_crtc_cleanup()
341 hrtimer_cancel(&du->vkms.timer); in vmw_vkms_crtc_cleanup()
348 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_crtc_atomic_begin()
350 if (vmw->vkms_enabled) in vmw_vkms_crtc_atomic_begin()
359 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_crtc_atomic_flush()
361 if (!vmw->vkms_enabled) in vmw_vkms_crtc_atomic_flush()
364 if (crtc->state->event) { in vmw_vkms_crtc_atomic_flush()
365 spin_lock_irqsave(&crtc->dev->event_lock, flags); in vmw_vkms_crtc_atomic_flush()
368 drm_crtc_send_vblank_event(crtc, crtc->state->event); in vmw_vkms_crtc_atomic_flush()
370 drm_crtc_arm_vblank_event(crtc, crtc->state->event); in vmw_vkms_crtc_atomic_flush()
372 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); in vmw_vkms_crtc_atomic_flush()
374 crtc->state->event = NULL; in vmw_vkms_crtc_atomic_flush()
384 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_crtc_atomic_enable()
386 if (vmw->vkms_enabled) in vmw_vkms_crtc_atomic_enable()
394 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_crtc_atomic_disable()
396 if (vmw->vkms_enabled) in vmw_vkms_crtc_atomic_disable()
403 struct vmw_private *vmw = vmw_priv(crtc->dev); in is_crc_supported()
405 if (!vmw->vkms_enabled) in is_crc_supported()
408 if (vmw->active_display_unit != vmw_du_screen_target) in is_crc_supported()
428 ret = -EINVAL; in crc_parse_source()
454 return -EINVAL; in vmw_vkms_verify_crc_source()
457 drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name); in vmw_vkms_verify_crc_source()
458 return -EINVAL; in vmw_vkms_verify_crc_source()
471 bool enabled, prev_enabled, locked; in vmw_vkms_set_crc_source() local
475 return -EINVAL; in vmw_vkms_set_crc_source()
482 locked = vmw_vkms_modeset_lock_relaxed(crtc); in vmw_vkms_set_crc_source()
483 prev_enabled = du->vkms.crc_enabled; in vmw_vkms_set_crc_source()
484 du->vkms.crc_enabled = enabled; in vmw_vkms_set_crc_source()
485 if (locked) in vmw_vkms_set_crc_source()
499 struct vmw_private *vmw = vmw_priv(crtc->dev); in vmw_vkms_set_crc_surface()
501 if (vmw->vkms_enabled && du->vkms.surface != surf) { in vmw_vkms_set_crc_surface()
502 WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET); in vmw_vkms_set_crc_surface()
503 if (du->vkms.surface) in vmw_vkms_set_crc_surface()
504 vmw_surface_unreference(&du->vkms.surface); in vmw_vkms_set_crc_surface()
506 du->vkms.surface = vmw_surface_reference(surf); in vmw_vkms_set_crc_surface()
511 * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
521 s64 nsecs = ktime_to_ns(du->vkms.period_ns); in vmw_vkms_lock_max_wait_ns()
527 * vmw_vkms_modeset_lock - Protects access to crtc during modeset
538 * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
540 * atomic context (the vblank handler) with the non-atomic entirity
541 * of kms - so use an atomic_t to track which part of vkms has access
554 ret = atomic_cmpxchg(&du->vkms.atomic_lock, in vmw_vkms_modeset_lock()
564 drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n", in vmw_vkms_modeset_lock()
565 total_delay, ret, atomic_read(&du->vkms.atomic_lock)); in vmw_vkms_modeset_lock()
570 * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
576 * Returns true if actually locked vkms to modeset or false otherwise.
588 ret = atomic_cmpxchg(&du->vkms.atomic_lock, in vmw_vkms_modeset_lock_relaxed()
600 drm_warn(crtc->dev, "VKMS relaxed lock expired!\n"); in vmw_vkms_modeset_lock_relaxed()
608 * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
613 * Returns true if locked vkms to vblank or false otherwise.
621 ret = atomic_cmpxchg(&du->vkms.atomic_lock, in vmw_vkms_vblank_trylock()
634 atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED); in vmw_vkms_unlock()