xref: /linux/drivers/gpu/drm/vkms/vkms_crtc.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/dma-fence.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/drm_probe_helper.h>
9 #include <drm/drm_vblank.h>
10 #include <drm/drm_vblank_helper.h>
11 
12 #include "vkms_drv.h"
13 
14 static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc)
15 {
16 	struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
17 	struct vkms_crtc_state *state;
18 	bool ret, fence_cookie;
19 
20 	fence_cookie = dma_fence_begin_signalling();
21 
22 	spin_lock(&output->lock);
23 	ret = drm_crtc_handle_vblank(crtc);
24 	if (!ret)
25 		DRM_ERROR("vkms failure on handling vblank");
26 
27 	state = output->composer_state;
28 	spin_unlock(&output->lock);
29 
30 	if (state && output->composer_enabled) {
31 		u64 frame = drm_crtc_accurate_vblank_count(crtc);
32 
33 		/* update frame_start only if a queued vkms_composer_worker()
34 		 * has read the data
35 		 */
36 		spin_lock(&output->composer_lock);
37 		if (!state->crc_pending)
38 			state->frame_start = frame;
39 		else
40 			DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
41 					 state->frame_start, frame);
42 		state->frame_end = frame;
43 		state->crc_pending = true;
44 		spin_unlock(&output->composer_lock);
45 
46 		ret = queue_work(output->composer_workq, &state->composer_work);
47 		if (!ret)
48 			DRM_DEBUG_DRIVER("Composer worker already queued\n");
49 	}
50 
51 	dma_fence_end_signalling(fence_cookie);
52 
53 	return true;
54 }
55 
56 static struct drm_crtc_state *
57 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
58 {
59 	struct vkms_crtc_state *vkms_state;
60 
61 	if (WARN_ON(!crtc->state))
62 		return NULL;
63 
64 	vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
65 	if (!vkms_state)
66 		return NULL;
67 
68 	__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
69 
70 	INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
71 
72 	return &vkms_state->base;
73 }
74 
75 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
76 					   struct drm_crtc_state *state)
77 {
78 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
79 
80 	__drm_atomic_helper_crtc_destroy_state(state);
81 
82 	WARN_ON(work_pending(&vkms_state->composer_work));
83 	kfree(vkms_state->active_planes);
84 	kfree(vkms_state);
85 }
86 
87 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
88 {
89 	struct vkms_crtc_state *vkms_state =
90 		kzalloc(sizeof(*vkms_state), GFP_KERNEL);
91 
92 	if (crtc->state)
93 		vkms_atomic_crtc_destroy_state(crtc, crtc->state);
94 
95 	__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
96 	if (vkms_state)
97 		INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
98 }
99 
100 static const struct drm_crtc_funcs vkms_crtc_funcs = {
101 	.set_config             = drm_atomic_helper_set_config,
102 	.page_flip              = drm_atomic_helper_page_flip,
103 	.reset                  = vkms_atomic_crtc_reset,
104 	.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
105 	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
106 	DRM_CRTC_VBLANK_TIMER_FUNCS,
107 	.get_crc_sources	= vkms_get_crc_sources,
108 	.set_crc_source		= vkms_set_crc_source,
109 	.verify_crc_source	= vkms_verify_crc_source,
110 };
111 
112 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
113 				  struct drm_atomic_state *state)
114 {
115 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
116 									  crtc);
117 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
118 	struct drm_plane *plane;
119 	struct drm_plane_state *plane_state;
120 	int i = 0, ret;
121 
122 	if (vkms_state->active_planes)
123 		return 0;
124 
125 	ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
126 	if (ret < 0)
127 		return ret;
128 
129 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
130 		plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
131 		WARN_ON(!plane_state);
132 
133 		if (!plane_state->visible)
134 			continue;
135 
136 		i++;
137 	}
138 
139 	vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
140 	if (!vkms_state->active_planes)
141 		return -ENOMEM;
142 	vkms_state->num_active_planes = i;
143 
144 	i = 0;
145 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
146 		plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
147 
148 		if (!plane_state->visible)
149 			continue;
150 
151 		vkms_state->active_planes[i++] =
152 			to_vkms_plane_state(plane_state);
153 	}
154 
155 	return 0;
156 }
157 
158 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
159 				   struct drm_atomic_state *state)
160 	__acquires(&vkms_output->lock)
161 {
162 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
163 
164 	/* This lock is held across the atomic commit to block vblank timer
165 	 * from scheduling vkms_composer_worker until the composer is updated
166 	 */
167 	spin_lock_irq(&vkms_output->lock);
168 }
169 
170 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
171 				   struct drm_atomic_state *state)
172 	__releases(&vkms_output->lock)
173 {
174 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
175 
176 	if (crtc->state->event) {
177 		spin_lock(&crtc->dev->event_lock);
178 
179 		if (drm_crtc_vblank_get(crtc) != 0)
180 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
181 		else
182 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
183 
184 		spin_unlock(&crtc->dev->event_lock);
185 
186 		crtc->state->event = NULL;
187 	}
188 
189 	vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
190 
191 	spin_unlock_irq(&vkms_output->lock);
192 }
193 
194 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
195 	.atomic_check	= vkms_crtc_atomic_check,
196 	.atomic_begin	= vkms_crtc_atomic_begin,
197 	.atomic_flush	= vkms_crtc_atomic_flush,
198 	.atomic_enable	= drm_crtc_vblank_atomic_enable,
199 	.atomic_disable	= drm_crtc_vblank_atomic_disable,
200 	.handle_vblank_timeout = vkms_crtc_handle_vblank_timeout,
201 };
202 
203 struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
204 				   struct drm_plane *cursor)
205 {
206 	struct vkms_output *vkms_out;
207 	struct drm_crtc *crtc;
208 	int ret;
209 
210 	vkms_out = drmm_crtc_alloc_with_planes(dev, struct vkms_output, crtc,
211 					       primary, cursor,
212 					       &vkms_crtc_funcs, NULL);
213 	if (IS_ERR(vkms_out)) {
214 		DRM_DEV_ERROR(dev->dev, "Failed to init CRTC\n");
215 		return vkms_out;
216 	}
217 
218 	crtc = &vkms_out->crtc;
219 
220 	drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
221 
222 	ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
223 	if (ret) {
224 		DRM_ERROR("Failed to set gamma size\n");
225 		return ERR_PTR(ret);
226 	}
227 
228 	drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
229 
230 	spin_lock_init(&vkms_out->lock);
231 	spin_lock_init(&vkms_out->composer_lock);
232 
233 	vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
234 	if (IS_ERR(vkms_out->composer_workq))
235 		return ERR_CAST(vkms_out->composer_workq);
236 
237 	return vkms_out;
238 }
239