xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/export.h>
4 #include <linux/fb.h>
5 #include <linux/vmalloc.h>
6 
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fbdev_dma.h>
9 #include <drm/drm_fb_dma_helper.h>
10 #include <drm/drm_fb_helper.h>
11 #include <drm/drm_framebuffer.h>
12 #include <drm/drm_gem_dma_helper.h>
13 #include <drm/drm_print.h>
14 
15 /*
16  * struct fb_ops
17  */
18 
19 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
20 {
21 	struct drm_fb_helper *fb_helper = info->par;
22 
23 	/* No need to take a ref for fbcon because it unbinds on unregister */
24 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
25 		return -ENODEV;
26 
27 	return 0;
28 }
29 
30 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
31 {
32 	struct drm_fb_helper *fb_helper = info->par;
33 
34 	if (user)
35 		module_put(fb_helper->dev->driver->fops->owner);
36 
37 	return 0;
38 }
39 
40 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
41 {
42 	struct drm_fb_helper *fb_helper = info->par;
43 
44 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
45 }
46 
47 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
48 {
49 	struct drm_fb_helper *fb_helper = info->par;
50 
51 	if (!fb_helper->dev)
52 		return;
53 
54 	if (info->fbdefio)
55 		fb_deferred_io_cleanup(info);
56 	drm_fb_helper_fini(fb_helper);
57 
58 	drm_client_buffer_vunmap(fb_helper->buffer);
59 	drm_client_buffer_delete(fb_helper->buffer);
60 	drm_client_release(&fb_helper->client);
61 }
62 
63 static const struct fb_ops drm_fbdev_dma_fb_ops = {
64 	.owner = THIS_MODULE,
65 	.fb_open = drm_fbdev_dma_fb_open,
66 	.fb_release = drm_fbdev_dma_fb_release,
67 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
68 	DRM_FB_HELPER_DEFAULT_OPS,
69 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
70 	.fb_mmap = drm_fbdev_dma_fb_mmap,
71 	.fb_destroy = drm_fbdev_dma_fb_destroy,
72 };
73 
74 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
75 				   drm_fb_helper_damage_range,
76 				   drm_fb_helper_damage_area);
77 
78 static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
79 {
80 	struct drm_fb_helper *fb_helper = info->par;
81 	void *shadow = info->screen_buffer;
82 
83 	if (!fb_helper->dev)
84 		return;
85 
86 	if (info->fbdefio)
87 		fb_deferred_io_cleanup(info);
88 	drm_fb_helper_fini(fb_helper);
89 	vfree(shadow);
90 
91 	drm_client_buffer_vunmap(fb_helper->buffer);
92 	drm_client_buffer_delete(fb_helper->buffer);
93 	drm_client_release(&fb_helper->client);
94 }
95 
96 static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
97 	.owner = THIS_MODULE,
98 	.fb_open = drm_fbdev_dma_fb_open,
99 	.fb_release = drm_fbdev_dma_fb_release,
100 	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
101 	DRM_FB_HELPER_DEFAULT_OPS,
102 	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
103 };
104 
105 /*
106  * struct drm_fb_helper
107  */
108 
109 static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
110 					   struct drm_clip_rect *clip,
111 					   struct iosys_map *dst)
112 {
113 	struct drm_framebuffer *fb = fb_helper->fb;
114 	size_t offset = clip->y1 * fb->pitches[0];
115 	size_t len = clip->x2 - clip->x1;
116 	unsigned int y;
117 	void *src;
118 
119 	switch (drm_format_info_bpp(fb->format, 0)) {
120 	case 1:
121 		offset += clip->x1 / 8;
122 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
123 		break;
124 	case 2:
125 		offset += clip->x1 / 4;
126 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
127 		break;
128 	case 4:
129 		offset += clip->x1 / 2;
130 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
131 		break;
132 	default:
133 		offset += clip->x1 * fb->format->cpp[0];
134 		len *= fb->format->cpp[0];
135 		break;
136 	}
137 
138 	src = fb_helper->info->screen_buffer + offset;
139 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
140 
141 	for (y = clip->y1; y < clip->y2; y++) {
142 		iosys_map_memcpy_to(dst, 0, src, len);
143 		iosys_map_incr(dst, fb->pitches[0]);
144 		src += fb->pitches[0];
145 	}
146 }
147 
148 static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
149 				     struct drm_clip_rect *clip)
150 {
151 	struct drm_client_buffer *buffer = fb_helper->buffer;
152 	struct iosys_map dst;
153 
154 	/*
155 	 * For fbdev emulation, we only have to protect against fbdev modeset
156 	 * operations. Nothing else will involve the client buffer's BO. So it
157 	 * is sufficient to acquire struct drm_fb_helper.lock here.
158 	 */
159 	mutex_lock(&fb_helper->lock);
160 
161 	dst = buffer->map;
162 	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
163 
164 	mutex_unlock(&fb_helper->lock);
165 
166 	return 0;
167 }
168 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
169 					 struct drm_clip_rect *clip)
170 {
171 	struct drm_device *dev = helper->dev;
172 	int ret;
173 
174 	/* Call damage handlers only if necessary */
175 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
176 		return 0;
177 
178 	if (helper->fb->funcs->dirty) {
179 		ret = drm_fbdev_dma_damage_blit(helper, clip);
180 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
181 			return ret;
182 
183 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
184 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
185 			return ret;
186 	}
187 
188 	return 0;
189 }
190 
191 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
192 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
193 };
194 
195 /*
196  * struct drm_fb_helper
197  */
198 
199 static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
200 						 struct drm_fb_helper_surface_size *sizes)
201 {
202 	struct drm_device *dev = fb_helper->dev;
203 	struct drm_client_buffer *buffer = fb_helper->buffer;
204 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
205 	struct drm_framebuffer *fb = fb_helper->fb;
206 	struct fb_info *info = fb_helper->info;
207 	struct iosys_map map = buffer->map;
208 
209 	info->fbops = &drm_fbdev_dma_fb_ops;
210 
211 	/* screen */
212 	info->flags |= FBINFO_VIRTFB; /* system memory */
213 	if (dma_obj->map_noncoherent)
214 		info->flags |= FBINFO_READS_FAST; /* signal caching */
215 	info->screen_size = sizes->surface_height * fb->pitches[0];
216 	info->screen_buffer = map.vaddr;
217 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
218 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
219 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
220 	}
221 	info->fix.smem_len = info->screen_size;
222 
223 	return 0;
224 }
225 
226 static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
227 							  struct drm_fb_helper_surface_size *sizes)
228 {
229 	struct drm_client_buffer *buffer = fb_helper->buffer;
230 	struct fb_info *info = fb_helper->info;
231 	size_t screen_size = buffer->gem->size;
232 	void *screen_buffer;
233 	int ret;
234 
235 	/*
236 	 * Deferred I/O requires struct page for framebuffer memory,
237 	 * which is not guaranteed for all DMA ranges. We thus create
238 	 * a shadow buffer in system memory.
239 	 */
240 	screen_buffer = vzalloc(screen_size);
241 	if (!screen_buffer)
242 		return -ENOMEM;
243 
244 	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
245 
246 	/* screen */
247 	info->flags |= FBINFO_VIRTFB; /* system memory */
248 	info->flags |= FBINFO_READS_FAST; /* signal caching */
249 	info->screen_buffer = screen_buffer;
250 	info->fix.smem_len = screen_size;
251 
252 	fb_helper->fbdefio.delay = HZ / 20;
253 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
254 
255 	info->fbdefio = &fb_helper->fbdefio;
256 	ret = fb_deferred_io_init(info);
257 	if (ret)
258 		goto err_vfree;
259 
260 	return 0;
261 
262 err_vfree:
263 	vfree(screen_buffer);
264 	return ret;
265 }
266 
267 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
268 				     struct drm_fb_helper_surface_size *sizes)
269 {
270 	struct drm_client_dev *client = &fb_helper->client;
271 	struct drm_device *dev = fb_helper->dev;
272 	struct drm_client_buffer *buffer;
273 	struct drm_framebuffer *fb;
274 	struct fb_info *info;
275 	u32 format;
276 	struct iosys_map map;
277 	int ret;
278 
279 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
280 		    sizes->surface_width, sizes->surface_height,
281 		    sizes->surface_bpp);
282 
283 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
284 					     sizes->surface_depth);
285 	buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
286 					       sizes->surface_height, format);
287 	if (IS_ERR(buffer))
288 		return PTR_ERR(buffer);
289 
290 	fb = buffer->fb;
291 
292 	ret = drm_client_buffer_vmap(buffer, &map);
293 	if (ret) {
294 		goto err_drm_client_buffer_delete;
295 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
296 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
297 		goto err_drm_client_buffer_delete;
298 	}
299 
300 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
301 	fb_helper->buffer = buffer;
302 	fb_helper->fb = fb;
303 
304 	info = drm_fb_helper_alloc_info(fb_helper);
305 	if (IS_ERR(info)) {
306 		ret = PTR_ERR(info);
307 		goto err_drm_client_buffer_vunmap;
308 	}
309 
310 	drm_fb_helper_fill_info(info, fb_helper, sizes);
311 
312 	if (fb->funcs->dirty)
313 		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
314 	else
315 		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
316 	if (ret)
317 		goto err_drm_fb_helper_release_info;
318 
319 	return 0;
320 
321 err_drm_fb_helper_release_info:
322 	drm_fb_helper_release_info(fb_helper);
323 err_drm_client_buffer_vunmap:
324 	fb_helper->fb = NULL;
325 	fb_helper->buffer = NULL;
326 	drm_client_buffer_vunmap(buffer);
327 err_drm_client_buffer_delete:
328 	drm_client_buffer_delete(buffer);
329 	return ret;
330 }
331 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
332