xref: /linux/drivers/gpu/drm/vkms/vkms_crtc.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/dma-fence.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_probe_helper.h>
8 #include <drm/drm_vblank.h>
9 
10 #include "vkms_drv.h"
11 
vkms_vblank_simulate(struct hrtimer * timer)12 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
13 {
14 	struct vkms_output *output = container_of(timer, struct vkms_output,
15 						  vblank_hrtimer);
16 	struct drm_crtc *crtc = &output->crtc;
17 	struct vkms_crtc_state *state;
18 	u64 ret_overrun;
19 	bool ret, fence_cookie;
20 
21 	fence_cookie = dma_fence_begin_signalling();
22 
23 	ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
24 					  output->period_ns);
25 	if (ret_overrun != 1)
26 		pr_warn("%s: vblank timer overrun\n", __func__);
27 
28 	spin_lock(&output->lock);
29 	ret = drm_crtc_handle_vblank(crtc);
30 	if (!ret)
31 		DRM_ERROR("vkms failure on handling vblank");
32 
33 	state = output->composer_state;
34 	spin_unlock(&output->lock);
35 
36 	if (state && output->composer_enabled) {
37 		u64 frame = drm_crtc_accurate_vblank_count(crtc);
38 
39 		/* update frame_start only if a queued vkms_composer_worker()
40 		 * has read the data
41 		 */
42 		spin_lock(&output->composer_lock);
43 		if (!state->crc_pending)
44 			state->frame_start = frame;
45 		else
46 			DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
47 					 state->frame_start, frame);
48 		state->frame_end = frame;
49 		state->crc_pending = true;
50 		spin_unlock(&output->composer_lock);
51 
52 		ret = queue_work(output->composer_workq, &state->composer_work);
53 		if (!ret)
54 			DRM_DEBUG_DRIVER("Composer worker already queued\n");
55 	}
56 
57 	dma_fence_end_signalling(fence_cookie);
58 
59 	return HRTIMER_RESTART;
60 }
61 
vkms_enable_vblank(struct drm_crtc * crtc)62 static int vkms_enable_vblank(struct drm_crtc *crtc)
63 {
64 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
65 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
66 
67 	drm_calc_timestamping_constants(crtc, &crtc->mode);
68 
69 	hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
70 	out->vblank_hrtimer.function = &vkms_vblank_simulate;
71 	out->period_ns = ktime_set(0, vblank->framedur_ns);
72 	hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
73 
74 	return 0;
75 }
76 
vkms_disable_vblank(struct drm_crtc * crtc)77 static void vkms_disable_vblank(struct drm_crtc *crtc)
78 {
79 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
80 
81 	hrtimer_cancel(&out->vblank_hrtimer);
82 }
83 
vkms_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)84 static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
85 				      int *max_error, ktime_t *vblank_time,
86 				      bool in_vblank_irq)
87 {
88 	struct drm_device *dev = crtc->dev;
89 	struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
90 	struct vkms_output *output = &vkmsdev->output;
91 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
92 
93 	if (!READ_ONCE(vblank->enabled)) {
94 		*vblank_time = ktime_get();
95 		return true;
96 	}
97 
98 	*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
99 
100 	if (WARN_ON(*vblank_time == vblank->time))
101 		return true;
102 
103 	/*
104 	 * To prevent races we roll the hrtimer forward before we do any
105 	 * interrupt processing - this is how real hw works (the interrupt is
106 	 * only generated after all the vblank registers are updated) and what
107 	 * the vblank core expects. Therefore we need to always correct the
108 	 * timestampe by one frame.
109 	 */
110 	*vblank_time -= output->period_ns;
111 
112 	return true;
113 }
114 
115 static struct drm_crtc_state *
vkms_atomic_crtc_duplicate_state(struct drm_crtc * crtc)116 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
117 {
118 	struct vkms_crtc_state *vkms_state;
119 
120 	if (WARN_ON(!crtc->state))
121 		return NULL;
122 
123 	vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
124 	if (!vkms_state)
125 		return NULL;
126 
127 	__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
128 
129 	INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
130 
131 	return &vkms_state->base;
132 }
133 
vkms_atomic_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)134 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
135 					   struct drm_crtc_state *state)
136 {
137 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
138 
139 	__drm_atomic_helper_crtc_destroy_state(state);
140 
141 	WARN_ON(work_pending(&vkms_state->composer_work));
142 	kfree(vkms_state->active_planes);
143 	kfree(vkms_state);
144 }
145 
vkms_atomic_crtc_reset(struct drm_crtc * crtc)146 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
147 {
148 	struct vkms_crtc_state *vkms_state =
149 		kzalloc(sizeof(*vkms_state), GFP_KERNEL);
150 
151 	if (crtc->state)
152 		vkms_atomic_crtc_destroy_state(crtc, crtc->state);
153 
154 	__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
155 	if (vkms_state)
156 		INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
157 }
158 
159 static const struct drm_crtc_funcs vkms_crtc_funcs = {
160 	.set_config             = drm_atomic_helper_set_config,
161 	.page_flip              = drm_atomic_helper_page_flip,
162 	.reset                  = vkms_atomic_crtc_reset,
163 	.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
164 	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
165 	.enable_vblank		= vkms_enable_vblank,
166 	.disable_vblank		= vkms_disable_vblank,
167 	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
168 	.get_crc_sources	= vkms_get_crc_sources,
169 	.set_crc_source		= vkms_set_crc_source,
170 	.verify_crc_source	= vkms_verify_crc_source,
171 };
172 
vkms_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)173 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
174 				  struct drm_atomic_state *state)
175 {
176 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
177 									  crtc);
178 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
179 	struct drm_plane *plane;
180 	struct drm_plane_state *plane_state;
181 	int i = 0, ret;
182 
183 	if (vkms_state->active_planes)
184 		return 0;
185 
186 	ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
187 	if (ret < 0)
188 		return ret;
189 
190 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
191 		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
192 								  plane);
193 		WARN_ON(!plane_state);
194 
195 		if (!plane_state->visible)
196 			continue;
197 
198 		i++;
199 	}
200 
201 	vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
202 	if (!vkms_state->active_planes)
203 		return -ENOMEM;
204 	vkms_state->num_active_planes = i;
205 
206 	i = 0;
207 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
208 		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
209 								  plane);
210 
211 		if (!plane_state->visible)
212 			continue;
213 
214 		vkms_state->active_planes[i++] =
215 			to_vkms_plane_state(plane_state);
216 	}
217 
218 	return 0;
219 }
220 
vkms_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)221 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
222 				    struct drm_atomic_state *state)
223 {
224 	drm_crtc_vblank_on(crtc);
225 }
226 
vkms_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)227 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
228 				     struct drm_atomic_state *state)
229 {
230 	drm_crtc_vblank_off(crtc);
231 }
232 
vkms_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)233 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
234 				   struct drm_atomic_state *state)
235 {
236 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
237 
238 	/* This lock is held across the atomic commit to block vblank timer
239 	 * from scheduling vkms_composer_worker until the composer is updated
240 	 */
241 	spin_lock_irq(&vkms_output->lock);
242 }
243 
vkms_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)244 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
245 				   struct drm_atomic_state *state)
246 {
247 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
248 
249 	if (crtc->state->event) {
250 		spin_lock(&crtc->dev->event_lock);
251 
252 		if (drm_crtc_vblank_get(crtc) != 0)
253 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
254 		else
255 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
256 
257 		spin_unlock(&crtc->dev->event_lock);
258 
259 		crtc->state->event = NULL;
260 	}
261 
262 	vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
263 
264 	spin_unlock_irq(&vkms_output->lock);
265 }
266 
267 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
268 	.atomic_check	= vkms_crtc_atomic_check,
269 	.atomic_begin	= vkms_crtc_atomic_begin,
270 	.atomic_flush	= vkms_crtc_atomic_flush,
271 	.atomic_enable	= vkms_crtc_atomic_enable,
272 	.atomic_disable	= vkms_crtc_atomic_disable,
273 };
274 
vkms_crtc_init(struct drm_device * dev,struct drm_crtc * crtc,struct drm_plane * primary,struct drm_plane * cursor)275 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
276 		   struct drm_plane *primary, struct drm_plane *cursor)
277 {
278 	struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
279 	int ret;
280 
281 	ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
282 					 &vkms_crtc_funcs, NULL);
283 	if (ret) {
284 		DRM_ERROR("Failed to init CRTC\n");
285 		return ret;
286 	}
287 
288 	drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
289 
290 	drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
291 	drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
292 
293 	spin_lock_init(&vkms_out->lock);
294 	spin_lock_init(&vkms_out->composer_lock);
295 
296 	vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
297 	if (!vkms_out->composer_workq)
298 		return -ENOMEM;
299 
300 	return ret;
301 }
302