xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "vmwgfx_vkms.h"
30 
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34 #include "vmwgfx_vkms.h"
35 
36 #include "vmw_surface_cache.h"
37 
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_debugfs_crc.h>
40 #include <drm/drm_print.h>
41 #include <drm/drm_vblank.h>
42 
43 #include <linux/crc32.h>
44 #include <linux/delay.h>
45 
46 #define GUESTINFO_VBLANK  "guestinfo.vmwgfx.vkms_enable"
47 
48 static int
49 vmw_surface_sync(struct vmw_private *vmw,
50 		 struct vmw_surface *surf)
51 {
52 	int ret;
53 	struct vmw_fence_obj *fence = NULL;
54 	struct vmw_bo *bo = surf->res.guest_memory_bo;
55 
56 	vmw_resource_clean(&surf->res);
57 
58 	ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
59 	if (ret != 0) {
60 		drm_warn(&vmw->drm, "%s: failed reserve\n", __func__);
61 		goto done;
62 	}
63 
64 	ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL);
65 	if (ret != 0) {
66 		drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__);
67 		ttm_bo_unreserve(&bo->tbo);
68 		goto done;
69 	}
70 
71 	dma_fence_wait(&fence->base, false);
72 	dma_fence_put(&fence->base);
73 
74 	ttm_bo_unreserve(&bo->tbo);
75 done:
76 	return ret;
77 }
78 
79 static int
80 compute_crc(struct drm_crtc *crtc,
81 	    struct vmw_surface *surf,
82 	    u32 *crc)
83 {
84 	u8 *mapped_surface;
85 	struct vmw_bo *bo = surf->res.guest_memory_bo;
86 	const struct SVGA3dSurfaceDesc *desc =
87 		vmw_surface_get_desc(surf->metadata.format);
88 	u32 row_pitch_bytes;
89 	SVGA3dSize blocks;
90 	u32 y;
91 
92 	*crc = 0;
93 
94 	vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks);
95 	row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock;
96 	WARN_ON(!bo);
97 	mapped_surface = vmw_bo_map_and_cache(bo);
98 
99 	for (y = 0; y < blocks.height; y++) {
100 		*crc = crc32_le(*crc, mapped_surface, row_pitch_bytes);
101 		mapped_surface += row_pitch_bytes;
102 	}
103 
104 	vmw_bo_unmap(bo);
105 
106 	return 0;
107 }
108 
109 static void
110 crc_generate_worker(struct work_struct *work)
111 {
112 	struct vmw_display_unit *du =
113 		container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
114 	struct drm_crtc *crtc = &du->crtc;
115 	struct vmw_private *vmw = vmw_priv(crtc->dev);
116 	bool crc_pending;
117 	u64 frame_start, frame_end;
118 	u32 crc32 = 0;
119 	struct vmw_surface *surf = 0;
120 	int ret;
121 
122 	spin_lock_irq(&du->vkms.crc_state_lock);
123 	crc_pending = du->vkms.crc_pending;
124 	spin_unlock_irq(&du->vkms.crc_state_lock);
125 
126 	/*
127 	 * We raced with the vblank hrtimer and previous work already computed
128 	 * the crc, nothing to do.
129 	 */
130 	if (!crc_pending)
131 		return;
132 
133 	spin_lock_irq(&du->vkms.crc_state_lock);
134 	surf = du->vkms.surface;
135 	spin_unlock_irq(&du->vkms.crc_state_lock);
136 
137 	if (vmw_surface_sync(vmw, surf)) {
138 		drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
139 		return;
140 	}
141 
142 	ret = compute_crc(crtc, surf, &crc32);
143 	if (ret)
144 		return;
145 
146 	spin_lock_irq(&du->vkms.crc_state_lock);
147 	frame_start = du->vkms.frame_start;
148 	frame_end = du->vkms.frame_end;
149 	crc_pending = du->vkms.crc_pending;
150 	du->vkms.frame_start = 0;
151 	du->vkms.frame_end = 0;
152 	du->vkms.crc_pending = false;
153 	spin_unlock_irq(&du->vkms.crc_state_lock);
154 
155 	/*
156 	 * The worker can fall behind the vblank hrtimer, make sure we catch up.
157 	 */
158 	while (frame_start <= frame_end)
159 		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
160 }
161 
162 static enum hrtimer_restart
163 vmw_vkms_vblank_simulate(struct hrtimer *timer)
164 {
165 	struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
166 	struct drm_crtc *crtc = &du->crtc;
167 	struct vmw_private *vmw = vmw_priv(crtc->dev);
168 	struct vmw_surface *surf = NULL;
169 	u64 ret_overrun;
170 	bool locked, ret;
171 
172 	ret_overrun = hrtimer_forward_now(&du->vkms.timer,
173 					  du->vkms.period_ns);
174 	if (ret_overrun != 1)
175 		drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n",
176 			       ret_overrun - 1);
177 
178 	locked = vmw_vkms_vblank_trylock(crtc);
179 	ret = drm_crtc_handle_vblank(crtc);
180 	WARN_ON(!ret);
181 	if (!locked)
182 		return HRTIMER_RESTART;
183 	surf = du->vkms.surface;
184 	vmw_vkms_unlock(crtc);
185 
186 	if (du->vkms.crc_enabled && surf) {
187 		u64 frame = drm_crtc_accurate_vblank_count(crtc);
188 
189 		spin_lock(&du->vkms.crc_state_lock);
190 		if (!du->vkms.crc_pending)
191 			du->vkms.frame_start = frame;
192 		else
193 			drm_dbg_driver(crtc->dev,
194 				       "crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
195 				       du->vkms.frame_start, frame);
196 		du->vkms.frame_end = frame;
197 		du->vkms.crc_pending = true;
198 		spin_unlock(&du->vkms.crc_state_lock);
199 
200 		ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work);
201 		if (!ret)
202 			drm_dbg_driver(crtc->dev, "Composer worker already queued\n");
203 	}
204 
205 	return HRTIMER_RESTART;
206 }
207 
208 void
209 vmw_vkms_init(struct vmw_private *vmw)
210 {
211 	char buffer[64];
212 	const size_t max_buf_len = sizeof(buffer) - 1;
213 	size_t buf_len = max_buf_len;
214 	int ret;
215 
216 	vmw->vkms_enabled = false;
217 
218 	ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len);
219 	if (ret || buf_len > max_buf_len)
220 		return;
221 	buffer[buf_len] = '\0';
222 
223 	ret = kstrtobool(buffer, &vmw->vkms_enabled);
224 	if (!ret && vmw->vkms_enabled) {
225 		ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS);
226 		vmw->vkms_enabled = (ret == 0);
227 	}
228 
229 	vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0);
230 	if (!vmw->crc_workq) {
231 		drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms.");
232 		vmw->vkms_enabled = false;
233 	}
234 	if (vmw->vkms_enabled)
235 		drm_info(&vmw->drm, "VKMS enabled\n");
236 }
237 
238 void
239 vmw_vkms_cleanup(struct vmw_private *vmw)
240 {
241 	destroy_workqueue(vmw->crc_workq);
242 }
243 
244 bool
245 vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
246 			      int *max_error,
247 			      ktime_t *vblank_time,
248 			      bool in_vblank_irq)
249 {
250 	struct drm_device *dev = crtc->dev;
251 	struct vmw_private *vmw = vmw_priv(dev);
252 	unsigned int pipe = crtc->index;
253 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
254 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
255 
256 	if (!vmw->vkms_enabled)
257 		return false;
258 
259 	if (!READ_ONCE(vblank->enabled)) {
260 		*vblank_time = ktime_get();
261 		return true;
262 	}
263 
264 	*vblank_time = READ_ONCE(du->vkms.timer.node.expires);
265 
266 	if (WARN_ON(*vblank_time == vblank->time))
267 		return true;
268 
269 	/*
270 	 * To prevent races we roll the hrtimer forward before we do any
271 	 * interrupt processing - this is how real hw works (the interrupt is
272 	 * only generated after all the vblank registers are updated) and what
273 	 * the vblank core expects. Therefore we need to always correct the
274 	 * timestampe by one frame.
275 	 */
276 	*vblank_time -= du->vkms.period_ns;
277 
278 	return true;
279 }
280 
281 int
282 vmw_vkms_enable_vblank(struct drm_crtc *crtc)
283 {
284 	struct drm_device *dev = crtc->dev;
285 	struct vmw_private *vmw = vmw_priv(dev);
286 	unsigned int pipe = drm_crtc_index(crtc);
287 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
288 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
289 
290 	if (!vmw->vkms_enabled)
291 		return -EINVAL;
292 
293 	drm_calc_timestamping_constants(crtc, &crtc->mode);
294 
295 	hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
296 	du->vkms.timer.function = &vmw_vkms_vblank_simulate;
297 	du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
298 	hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
299 
300 	return 0;
301 }
302 
303 void
304 vmw_vkms_disable_vblank(struct drm_crtc *crtc)
305 {
306 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
307 	struct vmw_private *vmw = vmw_priv(crtc->dev);
308 
309 	if (!vmw->vkms_enabled)
310 		return;
311 
312 	hrtimer_cancel(&du->vkms.timer);
313 	du->vkms.surface = NULL;
314 	du->vkms.period_ns = ktime_set(0, 0);
315 }
316 
317 enum vmw_vkms_lock_state {
318 	VMW_VKMS_LOCK_UNLOCKED     = 0,
319 	VMW_VKMS_LOCK_MODESET      = 1,
320 	VMW_VKMS_LOCK_VBLANK       = 2
321 };
322 
323 void
324 vmw_vkms_crtc_init(struct drm_crtc *crtc)
325 {
326 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
327 
328 	atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
329 	spin_lock_init(&du->vkms.crc_state_lock);
330 
331 	INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker);
332 	du->vkms.surface = NULL;
333 }
334 
335 void
336 vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
337 {
338 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
339 
340 	WARN_ON(work_pending(&du->vkms.crc_generator_work));
341 	hrtimer_cancel(&du->vkms.timer);
342 }
343 
344 void
345 vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
346 			   struct drm_atomic_state *state)
347 {
348 	struct vmw_private *vmw = vmw_priv(crtc->dev);
349 
350 	if (vmw->vkms_enabled)
351 		vmw_vkms_modeset_lock(crtc);
352 }
353 
354 void
355 vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
356 			   struct drm_atomic_state *state)
357 {
358 	unsigned long flags;
359 	struct vmw_private *vmw = vmw_priv(crtc->dev);
360 
361 	if (!vmw->vkms_enabled)
362 		return;
363 
364 	if (crtc->state->event) {
365 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
366 
367 		if (drm_crtc_vblank_get(crtc) != 0)
368 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
369 		else
370 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
371 
372 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
373 
374 		crtc->state->event = NULL;
375 	}
376 
377 	vmw_vkms_unlock(crtc);
378 }
379 
380 void
381 vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
382 			    struct drm_atomic_state *state)
383 {
384 	struct vmw_private *vmw = vmw_priv(crtc->dev);
385 
386 	if (vmw->vkms_enabled)
387 		drm_crtc_vblank_on(crtc);
388 }
389 
390 void
391 vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
392 			     struct drm_atomic_state *state)
393 {
394 	struct vmw_private *vmw = vmw_priv(crtc->dev);
395 
396 	if (vmw->vkms_enabled)
397 		drm_crtc_vblank_off(crtc);
398 }
399 
400 static bool
401 is_crc_supported(struct drm_crtc *crtc)
402 {
403 	struct vmw_private *vmw = vmw_priv(crtc->dev);
404 
405 	if (!vmw->vkms_enabled)
406 		return false;
407 
408 	if (vmw->active_display_unit != vmw_du_screen_target)
409 		return false;
410 
411 	return true;
412 }
413 
414 static const char * const pipe_crc_sources[] = {"auto"};
415 
416 static int
417 crc_parse_source(const char *src_name,
418 		 bool *enabled)
419 {
420 	int ret = 0;
421 
422 	if (!src_name) {
423 		*enabled = false;
424 	} else if (strcmp(src_name, "auto") == 0) {
425 		*enabled = true;
426 	} else {
427 		*enabled = false;
428 		ret = -EINVAL;
429 	}
430 
431 	return ret;
432 }
433 
434 const char *const *
435 vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
436 			 size_t *count)
437 {
438 	*count = 0;
439 	if (!is_crc_supported(crtc))
440 		return NULL;
441 
442 	*count = ARRAY_SIZE(pipe_crc_sources);
443 	return pipe_crc_sources;
444 }
445 
446 int
447 vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
448 			   const char *src_name,
449 			   size_t *values_cnt)
450 {
451 	bool enabled;
452 
453 	if (!is_crc_supported(crtc))
454 		return -EINVAL;
455 
456 	if (crc_parse_source(src_name, &enabled) < 0) {
457 		drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name);
458 		return -EINVAL;
459 	}
460 
461 	*values_cnt = 1;
462 
463 	return 0;
464 }
465 
466 int
467 vmw_vkms_set_crc_source(struct drm_crtc *crtc,
468 			const char *src_name)
469 {
470 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
471 	bool enabled, prev_enabled, locked;
472 	int ret;
473 
474 	if (!is_crc_supported(crtc))
475 		return -EINVAL;
476 
477 	ret = crc_parse_source(src_name, &enabled);
478 
479 	if (enabled)
480 		drm_crtc_vblank_get(crtc);
481 
482 	locked = vmw_vkms_modeset_lock_relaxed(crtc);
483 	prev_enabled = du->vkms.crc_enabled;
484 	du->vkms.crc_enabled = enabled;
485 	if (locked)
486 		vmw_vkms_unlock(crtc);
487 
488 	if (prev_enabled)
489 		drm_crtc_vblank_put(crtc);
490 
491 	return ret;
492 }
493 
494 void
495 vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
496 			 struct vmw_surface *surf)
497 {
498 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
499 	struct vmw_private *vmw = vmw_priv(crtc->dev);
500 
501 	if (vmw->vkms_enabled) {
502 		WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
503 		du->vkms.surface = surf;
504 	}
505 }
506 
507 /**
508  * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
509  * @du: The vmw_display_unit from which to grab the vblank timings
510  *
511  * Returns the maximum wait time used to acquire the vkms lock. By
512  * default uses a time of a single frame and in case where vblank
513  * was not initialized for the display unit 1/60th of a second.
514  */
515 static inline u64
516 vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du)
517 {
518 	s64 nsecs = ktime_to_ns(du->vkms.period_ns);
519 
520 	return  (nsecs > 0) ? nsecs : 16666666;
521 }
522 
523 /**
524  * vmw_vkms_modeset_lock - Protects access to crtc during modeset
525  * @crtc: The crtc to lock for vkms
526  *
527  * This function prevents the VKMS timers/callbacks from being called
528  * while a modeset operation is in process. We don't want the callbacks
529  * e.g. the vblank simulator to be trying to access incomplete state
530  * so we need to make sure they execute only when the modeset has
531  * finished.
532  *
533  * Normally this would have been done with a spinlock but locking the
534  * entire atomic modeset with vmwgfx is impossible because kms prepare
535  * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
536  * guard various bits of state). Which means that we need to synchronize
537  * atomic context (the vblank handler) with the non-atomic entirity
538  * of kms - so use an atomic_t to track which part of vkms has access
539  * to the basic vkms state.
540  */
541 void
542 vmw_vkms_modeset_lock(struct drm_crtc *crtc)
543 {
544 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
545 	const u64 nsecs_delay = 10;
546 	const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
547 	u64 total_delay = 0;
548 	int ret;
549 
550 	do {
551 		ret = atomic_cmpxchg(&du->vkms.atomic_lock,
552 				     VMW_VKMS_LOCK_UNLOCKED,
553 				     VMW_VKMS_LOCK_MODESET);
554 		if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY)
555 			break;
556 		ndelay(nsecs_delay);
557 		total_delay += nsecs_delay;
558 	} while (1);
559 
560 	if (total_delay >= MAX_NSECS_DELAY) {
561 		drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n",
562 			 total_delay, ret, atomic_read(&du->vkms.atomic_lock));
563 	}
564 }
565 
566 /**
567  * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
568  * @crtc: The crtc to lock for vkms
569  *
570  * Much like vmw_vkms_modeset_lock except that when the crtc is currently
571  * in a modeset it will return immediately.
572  *
573  * Returns true if actually locked vkms to modeset or false otherwise.
574  */
575 bool
576 vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc)
577 {
578 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
579 	const u64 nsecs_delay = 10;
580 	const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
581 	u64 total_delay = 0;
582 	int ret;
583 
584 	do {
585 		ret = atomic_cmpxchg(&du->vkms.atomic_lock,
586 				     VMW_VKMS_LOCK_UNLOCKED,
587 				     VMW_VKMS_LOCK_MODESET);
588 		if (ret == VMW_VKMS_LOCK_UNLOCKED ||
589 		    ret == VMW_VKMS_LOCK_MODESET ||
590 		    total_delay >= MAX_NSECS_DELAY)
591 			break;
592 		ndelay(nsecs_delay);
593 		total_delay += nsecs_delay;
594 	} while (1);
595 
596 	if (total_delay >= MAX_NSECS_DELAY) {
597 		drm_warn(crtc->dev, "VKMS relaxed lock expired!\n");
598 		return false;
599 	}
600 
601 	return ret == VMW_VKMS_LOCK_UNLOCKED;
602 }
603 
604 /**
605  * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
606  * @crtc: The crtc to lock for vkms
607  *
608  * Tries to lock vkms for vblank, returns immediately.
609  *
610  * Returns true if locked vkms to vblank or false otherwise.
611  */
612 bool
613 vmw_vkms_vblank_trylock(struct drm_crtc *crtc)
614 {
615 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
616 	u32 ret;
617 
618 	ret = atomic_cmpxchg(&du->vkms.atomic_lock,
619 			     VMW_VKMS_LOCK_UNLOCKED,
620 			     VMW_VKMS_LOCK_VBLANK);
621 
622 	return ret == VMW_VKMS_LOCK_UNLOCKED;
623 }
624 
625 void
626 vmw_vkms_unlock(struct drm_crtc *crtc)
627 {
628 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
629 
630 	/* Release flag; mark it as unlocked. */
631 	atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
632 }
633