xref: /linux/drivers/gpu/drm/vkms/vkms_composer.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/crc32.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_gem_framebuffer_helper.h>
8 #include <drm/drm_gem_shmem_helper.h>
9 #include <drm/drm_vblank.h>
10 
11 #include "vkms_drv.h"
12 
13 static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
14 				 const struct vkms_composer *composer)
15 {
16 	u32 pixel;
17 	int src_offset = composer->offset + (y * composer->pitch)
18 				      + (x * composer->cpp);
19 
20 	pixel = *(u32 *)&buffer[src_offset];
21 
22 	return pixel;
23 }
24 
25 /**
26  * compute_crc - Compute CRC value on output frame
27  *
28  * @vaddr: address to final framebuffer
29  * @composer: framebuffer's metadata
30  *
31  * returns CRC value computed using crc32 on the visible portion of
32  * the final framebuffer at vaddr_out
33  */
34 static uint32_t compute_crc(const u8 *vaddr,
35 			    const struct vkms_composer *composer)
36 {
37 	int x, y;
38 	u32 crc = 0, pixel = 0;
39 	int x_src = composer->src.x1 >> 16;
40 	int y_src = composer->src.y1 >> 16;
41 	int h_src = drm_rect_height(&composer->src) >> 16;
42 	int w_src = drm_rect_width(&composer->src) >> 16;
43 
44 	for (y = y_src; y < y_src + h_src; ++y) {
45 		for (x = x_src; x < x_src + w_src; ++x) {
46 			pixel = get_pixel_from_buffer(x, y, vaddr, composer);
47 			crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
48 		}
49 	}
50 
51 	return crc;
52 }
53 
54 static u8 blend_channel(u8 src, u8 dst, u8 alpha)
55 {
56 	u32 pre_blend;
57 	u8 new_color;
58 
59 	pre_blend = (src * 255 + dst * (255 - alpha));
60 
61 	/* Faster div by 255 */
62 	new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
63 
64 	return new_color;
65 }
66 
67 static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
68 {
69 	u8 alpha;
70 
71 	alpha = argb_src[3];
72 	argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
73 	argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
74 	argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
75 	/* Opaque primary */
76 	argb_dst[3] = 0xFF;
77 }
78 
79 /**
80  * blend - blend value at vaddr_src with value at vaddr_dst
81  * @vaddr_dst: destination address
82  * @vaddr_src: source address
83  * @dst_composer: destination framebuffer's metadata
84  * @src_composer: source framebuffer's metadata
85  *
86  * Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied
87  * alpha blending equation, since DRM currently assumes that the pixel color
88  * values have already been pre-multiplied with the alpha channel values. See
89  * more drm_plane_create_blend_mode_property(). This function uses buffer's
90  * metadata to locate the new composite values at vaddr_dst.
91  */
92 static void blend(void *vaddr_dst, void *vaddr_src,
93 		  struct vkms_composer *dst_composer,
94 		  struct vkms_composer *src_composer)
95 {
96 	int i, j, j_dst, i_dst;
97 	int offset_src, offset_dst;
98 	u8 *pixel_dst, *pixel_src;
99 
100 	int x_src = src_composer->src.x1 >> 16;
101 	int y_src = src_composer->src.y1 >> 16;
102 
103 	int x_dst = src_composer->dst.x1;
104 	int y_dst = src_composer->dst.y1;
105 	int h_dst = drm_rect_height(&src_composer->dst);
106 	int w_dst = drm_rect_width(&src_composer->dst);
107 
108 	int y_limit = y_src + h_dst;
109 	int x_limit = x_src + w_dst;
110 
111 	for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
112 		for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
113 			offset_dst = dst_composer->offset
114 				     + (i_dst * dst_composer->pitch)
115 				     + (j_dst++ * dst_composer->cpp);
116 			offset_src = src_composer->offset
117 				     + (i * src_composer->pitch)
118 				     + (j * src_composer->cpp);
119 
120 			pixel_src = (u8 *)(vaddr_src + offset_src);
121 			pixel_dst = (u8 *)(vaddr_dst + offset_dst);
122 			alpha_blending(pixel_src, pixel_dst);
123 		}
124 		i_dst++;
125 	}
126 }
127 
128 static void compose_cursor(struct vkms_composer *cursor_composer,
129 			   struct vkms_composer *primary_composer,
130 			   void *vaddr_out)
131 {
132 	struct drm_gem_object *cursor_obj;
133 	struct drm_gem_shmem_object *cursor_shmem_obj;
134 
135 	cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
136 	cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj);
137 
138 	if (WARN_ON(!cursor_shmem_obj->vaddr))
139 		return;
140 
141 	blend(vaddr_out, cursor_shmem_obj->vaddr,
142 	      primary_composer, cursor_composer);
143 }
144 
145 static int compose_planes(void **vaddr_out,
146 			  struct vkms_composer *primary_composer,
147 			  struct vkms_composer *cursor_composer)
148 {
149 	struct drm_framebuffer *fb = &primary_composer->fb;
150 	struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
151 	struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
152 
153 	if (!*vaddr_out) {
154 		*vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
155 		if (!*vaddr_out) {
156 			DRM_ERROR("Cannot allocate memory for output frame.");
157 			return -ENOMEM;
158 		}
159 	}
160 
161 	if (WARN_ON(!shmem_obj->vaddr))
162 		return -EINVAL;
163 
164 	memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
165 
166 	if (cursor_composer)
167 		compose_cursor(cursor_composer, primary_composer, *vaddr_out);
168 
169 	return 0;
170 }
171 
172 /**
173  * vkms_composer_worker - ordered work_struct to compute CRC
174  *
175  * @work: work_struct
176  *
177  * Work handler for composing and computing CRCs. work_struct scheduled in
178  * an ordered workqueue that's periodically scheduled to run by
179  * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
180  */
181 void vkms_composer_worker(struct work_struct *work)
182 {
183 	struct vkms_crtc_state *crtc_state = container_of(work,
184 						struct vkms_crtc_state,
185 						composer_work);
186 	struct drm_crtc *crtc = crtc_state->base.crtc;
187 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
188 	struct vkms_composer *primary_composer = NULL;
189 	struct vkms_composer *cursor_composer = NULL;
190 	bool crc_pending, wb_pending;
191 	void *vaddr_out = NULL;
192 	u32 crc32 = 0;
193 	u64 frame_start, frame_end;
194 	int ret;
195 
196 	spin_lock_irq(&out->composer_lock);
197 	frame_start = crtc_state->frame_start;
198 	frame_end = crtc_state->frame_end;
199 	crc_pending = crtc_state->crc_pending;
200 	wb_pending = crtc_state->wb_pending;
201 	crtc_state->frame_start = 0;
202 	crtc_state->frame_end = 0;
203 	crtc_state->crc_pending = false;
204 	spin_unlock_irq(&out->composer_lock);
205 
206 	/*
207 	 * We raced with the vblank hrtimer and previous work already computed
208 	 * the crc, nothing to do.
209 	 */
210 	if (!crc_pending)
211 		return;
212 
213 	if (crtc_state->num_active_planes >= 1)
214 		primary_composer = crtc_state->active_planes[0]->composer;
215 
216 	if (crtc_state->num_active_planes == 2)
217 		cursor_composer = crtc_state->active_planes[1]->composer;
218 
219 	if (!primary_composer)
220 		return;
221 
222 	if (wb_pending)
223 		vaddr_out = crtc_state->active_writeback;
224 
225 	ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
226 	if (ret) {
227 		if (ret == -EINVAL && !wb_pending)
228 			kfree(vaddr_out);
229 		return;
230 	}
231 
232 	crc32 = compute_crc(vaddr_out, primary_composer);
233 
234 	if (wb_pending) {
235 		drm_writeback_signal_completion(&out->wb_connector, 0);
236 		spin_lock_irq(&out->composer_lock);
237 		crtc_state->wb_pending = false;
238 		spin_unlock_irq(&out->composer_lock);
239 	} else {
240 		kfree(vaddr_out);
241 	}
242 
243 	/*
244 	 * The worker can fall behind the vblank hrtimer, make sure we catch up.
245 	 */
246 	while (frame_start <= frame_end)
247 		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
248 }
249 
250 static const char * const pipe_crc_sources[] = {"auto"};
251 
252 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
253 					size_t *count)
254 {
255 	*count = ARRAY_SIZE(pipe_crc_sources);
256 	return pipe_crc_sources;
257 }
258 
259 static int vkms_crc_parse_source(const char *src_name, bool *enabled)
260 {
261 	int ret = 0;
262 
263 	if (!src_name) {
264 		*enabled = false;
265 	} else if (strcmp(src_name, "auto") == 0) {
266 		*enabled = true;
267 	} else {
268 		*enabled = false;
269 		ret = -EINVAL;
270 	}
271 
272 	return ret;
273 }
274 
275 int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
276 			   size_t *values_cnt)
277 {
278 	bool enabled;
279 
280 	if (vkms_crc_parse_source(src_name, &enabled) < 0) {
281 		DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
282 		return -EINVAL;
283 	}
284 
285 	*values_cnt = 1;
286 
287 	return 0;
288 }
289 
290 void vkms_set_composer(struct vkms_output *out, bool enabled)
291 {
292 	bool old_enabled;
293 
294 	if (enabled)
295 		drm_crtc_vblank_get(&out->crtc);
296 
297 	spin_lock_irq(&out->lock);
298 	old_enabled = out->composer_enabled;
299 	out->composer_enabled = enabled;
300 	spin_unlock_irq(&out->lock);
301 
302 	if (old_enabled)
303 		drm_crtc_vblank_put(&out->crtc);
304 }
305 
306 int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
307 {
308 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
309 	bool enabled = false;
310 	int ret = 0;
311 
312 	ret = vkms_crc_parse_source(src_name, &enabled);
313 
314 	vkms_set_composer(out, enabled);
315 
316 	return ret;
317 }
318