xref: /linux/drivers/gpu/drm/vkms/vkms_crtc.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/dma-fence.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/drm_print.h>
9 #include <drm/drm_probe_helper.h>
10 #include <drm/drm_vblank.h>
11 #include <drm/drm_vblank_helper.h>
12 
13 #include "vkms_drv.h"
14 
15 static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc)
16 {
17 	struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
18 	struct vkms_crtc_state *state;
19 	bool ret, fence_cookie;
20 
21 	fence_cookie = dma_fence_begin_signalling();
22 
23 	spin_lock(&output->lock);
24 	ret = drm_crtc_handle_vblank(crtc);
25 	if (!ret)
26 		DRM_ERROR("vkms failure on handling vblank");
27 
28 	state = output->composer_state;
29 	spin_unlock(&output->lock);
30 
31 	if (state && output->composer_enabled) {
32 		u64 frame = drm_crtc_accurate_vblank_count(crtc);
33 
34 		/* update frame_start only if a queued vkms_composer_worker()
35 		 * has read the data
36 		 */
37 		spin_lock(&output->composer_lock);
38 		if (!state->crc_pending)
39 			state->frame_start = frame;
40 		else
41 			DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
42 					 state->frame_start, frame);
43 		state->frame_end = frame;
44 		state->crc_pending = true;
45 		spin_unlock(&output->composer_lock);
46 
47 		ret = queue_work(output->composer_workq, &state->composer_work);
48 		if (!ret)
49 			DRM_DEBUG_DRIVER("Composer worker already queued\n");
50 	}
51 
52 	dma_fence_end_signalling(fence_cookie);
53 
54 	return true;
55 }
56 
57 static struct drm_crtc_state *
58 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
59 {
60 	struct vkms_crtc_state *vkms_state;
61 
62 	if (WARN_ON(!crtc->state))
63 		return NULL;
64 
65 	vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
66 	if (!vkms_state)
67 		return NULL;
68 
69 	__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
70 
71 	INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
72 
73 	return &vkms_state->base;
74 }
75 
76 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
77 					   struct drm_crtc_state *state)
78 {
79 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
80 
81 	__drm_atomic_helper_crtc_destroy_state(state);
82 
83 	WARN_ON(work_pending(&vkms_state->composer_work));
84 	kfree(vkms_state->active_planes);
85 	kfree(vkms_state);
86 }
87 
88 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
89 {
90 	struct vkms_crtc_state *vkms_state =
91 		kzalloc(sizeof(*vkms_state), GFP_KERNEL);
92 
93 	if (crtc->state)
94 		vkms_atomic_crtc_destroy_state(crtc, crtc->state);
95 
96 	__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
97 	if (vkms_state)
98 		INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
99 }
100 
101 static const struct drm_crtc_funcs vkms_crtc_funcs = {
102 	.set_config             = drm_atomic_helper_set_config,
103 	.page_flip              = drm_atomic_helper_page_flip,
104 	.reset                  = vkms_atomic_crtc_reset,
105 	.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
106 	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
107 	DRM_CRTC_VBLANK_TIMER_FUNCS,
108 	.get_crc_sources	= vkms_get_crc_sources,
109 	.set_crc_source		= vkms_set_crc_source,
110 	.verify_crc_source	= vkms_verify_crc_source,
111 };
112 
113 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
114 				  struct drm_atomic_state *state)
115 {
116 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
117 									  crtc);
118 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
119 	struct drm_plane *plane;
120 	struct drm_plane_state *plane_state;
121 	int i = 0, ret;
122 
123 	if (vkms_state->active_planes)
124 		return 0;
125 
126 	ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
127 	if (ret < 0)
128 		return ret;
129 
130 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
131 		plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
132 		WARN_ON(!plane_state);
133 
134 		if (!plane_state->visible)
135 			continue;
136 
137 		i++;
138 	}
139 
140 	vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
141 	if (!vkms_state->active_planes)
142 		return -ENOMEM;
143 	vkms_state->num_active_planes = i;
144 
145 	i = 0;
146 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
147 		plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
148 
149 		if (!plane_state->visible)
150 			continue;
151 
152 		vkms_state->active_planes[i++] =
153 			to_vkms_plane_state(plane_state);
154 	}
155 
156 	return 0;
157 }
158 
159 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
160 				   struct drm_atomic_state *state)
161 	__acquires(&vkms_output->lock)
162 {
163 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
164 
165 	/* This lock is held across the atomic commit to block vblank timer
166 	 * from scheduling vkms_composer_worker until the composer is updated
167 	 */
168 	spin_lock_irq(&vkms_output->lock);
169 }
170 
171 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
172 				   struct drm_atomic_state *state)
173 	__releases(&vkms_output->lock)
174 {
175 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
176 
177 	if (crtc->state->event) {
178 		spin_lock(&crtc->dev->event_lock);
179 
180 		if (drm_crtc_vblank_get(crtc) != 0)
181 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
182 		else
183 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
184 
185 		spin_unlock(&crtc->dev->event_lock);
186 
187 		crtc->state->event = NULL;
188 	}
189 
190 	vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
191 
192 	spin_unlock_irq(&vkms_output->lock);
193 }
194 
195 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
196 	.atomic_check	= vkms_crtc_atomic_check,
197 	.atomic_begin	= vkms_crtc_atomic_begin,
198 	.atomic_flush	= vkms_crtc_atomic_flush,
199 	.atomic_enable	= drm_crtc_vblank_atomic_enable,
200 	.atomic_disable	= drm_crtc_vblank_atomic_disable,
201 	.handle_vblank_timeout = vkms_crtc_handle_vblank_timeout,
202 };
203 
204 struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
205 				   struct drm_plane *cursor)
206 {
207 	struct vkms_output *vkms_out;
208 	struct drm_crtc *crtc;
209 	int ret;
210 
211 	vkms_out = drmm_crtc_alloc_with_planes(dev, struct vkms_output, crtc,
212 					       primary, cursor,
213 					       &vkms_crtc_funcs, NULL);
214 	if (IS_ERR(vkms_out)) {
215 		DRM_DEV_ERROR(dev->dev, "Failed to init CRTC\n");
216 		return vkms_out;
217 	}
218 
219 	crtc = &vkms_out->crtc;
220 
221 	drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
222 
223 	ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
224 	if (ret) {
225 		DRM_ERROR("Failed to set gamma size\n");
226 		return ERR_PTR(ret);
227 	}
228 
229 	drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
230 
231 	spin_lock_init(&vkms_out->lock);
232 	spin_lock_init(&vkms_out->composer_lock);
233 
234 	vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
235 	if (IS_ERR(vkms_out->composer_workq))
236 		return ERR_CAST(vkms_out->composer_workq);
237 
238 	return vkms_out;
239 }
240