xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/export.h>
4 #include <linux/fb.h>
5 #include <linux/vmalloc.h>
6 
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fbdev_dma.h>
9 #include <drm/drm_fb_dma_helper.h>
10 #include <drm/drm_fb_helper.h>
11 #include <drm/drm_framebuffer.h>
12 #include <drm/drm_gem_dma_helper.h>
13 
14 /*
15  * struct fb_ops
16  */
17 
18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19 {
20 	struct drm_fb_helper *fb_helper = info->par;
21 
22 	/* No need to take a ref for fbcon because it unbinds on unregister */
23 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 		return -ENODEV;
25 
26 	return 0;
27 }
28 
29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30 {
31 	struct drm_fb_helper *fb_helper = info->par;
32 
33 	if (user)
34 		module_put(fb_helper->dev->driver->fops->owner);
35 
36 	return 0;
37 }
38 
39 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
40 {
41 	struct drm_fb_helper *fb_helper = info->par;
42 
43 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
44 }
45 
46 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
47 {
48 	struct drm_fb_helper *fb_helper = info->par;
49 
50 	if (!fb_helper->dev)
51 		return;
52 
53 	if (info->fbdefio)
54 		fb_deferred_io_cleanup(info);
55 	drm_fb_helper_fini(fb_helper);
56 
57 	drm_client_buffer_vunmap(fb_helper->buffer);
58 	drm_client_framebuffer_delete(fb_helper->buffer);
59 	drm_client_release(&fb_helper->client);
60 	drm_fb_helper_unprepare(fb_helper);
61 	kfree(fb_helper);
62 }
63 
64 static const struct fb_ops drm_fbdev_dma_fb_ops = {
65 	.owner = THIS_MODULE,
66 	.fb_open = drm_fbdev_dma_fb_open,
67 	.fb_release = drm_fbdev_dma_fb_release,
68 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
69 	DRM_FB_HELPER_DEFAULT_OPS,
70 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
71 	.fb_mmap = drm_fbdev_dma_fb_mmap,
72 	.fb_destroy = drm_fbdev_dma_fb_destroy,
73 };
74 
75 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
76 				   drm_fb_helper_damage_range,
77 				   drm_fb_helper_damage_area);
78 
79 static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
80 {
81 	struct drm_fb_helper *fb_helper = info->par;
82 	void *shadow = info->screen_buffer;
83 
84 	if (!fb_helper->dev)
85 		return;
86 
87 	if (info->fbdefio)
88 		fb_deferred_io_cleanup(info);
89 	drm_fb_helper_fini(fb_helper);
90 	vfree(shadow);
91 
92 	drm_client_buffer_vunmap(fb_helper->buffer);
93 	drm_client_framebuffer_delete(fb_helper->buffer);
94 	drm_client_release(&fb_helper->client);
95 	drm_fb_helper_unprepare(fb_helper);
96 	kfree(fb_helper);
97 }
98 
99 static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
100 	.owner = THIS_MODULE,
101 	.fb_open = drm_fbdev_dma_fb_open,
102 	.fb_release = drm_fbdev_dma_fb_release,
103 	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
104 	DRM_FB_HELPER_DEFAULT_OPS,
105 	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
106 };
107 
108 /*
109  * struct drm_fb_helper
110  */
111 
112 static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
113 					   struct drm_clip_rect *clip,
114 					   struct iosys_map *dst)
115 {
116 	struct drm_framebuffer *fb = fb_helper->fb;
117 	size_t offset = clip->y1 * fb->pitches[0];
118 	size_t len = clip->x2 - clip->x1;
119 	unsigned int y;
120 	void *src;
121 
122 	switch (drm_format_info_bpp(fb->format, 0)) {
123 	case 1:
124 		offset += clip->x1 / 8;
125 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
126 		break;
127 	case 2:
128 		offset += clip->x1 / 4;
129 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
130 		break;
131 	case 4:
132 		offset += clip->x1 / 2;
133 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
134 		break;
135 	default:
136 		offset += clip->x1 * fb->format->cpp[0];
137 		len *= fb->format->cpp[0];
138 		break;
139 	}
140 
141 	src = fb_helper->info->screen_buffer + offset;
142 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
143 
144 	for (y = clip->y1; y < clip->y2; y++) {
145 		iosys_map_memcpy_to(dst, 0, src, len);
146 		iosys_map_incr(dst, fb->pitches[0]);
147 		src += fb->pitches[0];
148 	}
149 }
150 
151 static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
152 				     struct drm_clip_rect *clip)
153 {
154 	struct drm_client_buffer *buffer = fb_helper->buffer;
155 	struct iosys_map dst;
156 
157 	/*
158 	 * For fbdev emulation, we only have to protect against fbdev modeset
159 	 * operations. Nothing else will involve the client buffer's BO. So it
160 	 * is sufficient to acquire struct drm_fb_helper.lock here.
161 	 */
162 	mutex_lock(&fb_helper->lock);
163 
164 	dst = buffer->map;
165 	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
166 
167 	mutex_unlock(&fb_helper->lock);
168 
169 	return 0;
170 }
171 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
172 					 struct drm_clip_rect *clip)
173 {
174 	struct drm_device *dev = helper->dev;
175 	int ret;
176 
177 	/* Call damage handlers only if necessary */
178 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
179 		return 0;
180 
181 	if (helper->fb->funcs->dirty) {
182 		ret = drm_fbdev_dma_damage_blit(helper, clip);
183 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
184 			return ret;
185 
186 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
187 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
188 			return ret;
189 	}
190 
191 	return 0;
192 }
193 
194 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
195 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
196 };
197 
198 /*
199  * struct drm_fb_helper
200  */
201 
202 static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
203 						 struct drm_fb_helper_surface_size *sizes)
204 {
205 	struct drm_device *dev = fb_helper->dev;
206 	struct drm_client_buffer *buffer = fb_helper->buffer;
207 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
208 	struct drm_framebuffer *fb = fb_helper->fb;
209 	struct fb_info *info = fb_helper->info;
210 	struct iosys_map map = buffer->map;
211 
212 	info->fbops = &drm_fbdev_dma_fb_ops;
213 
214 	/* screen */
215 	info->flags |= FBINFO_VIRTFB; /* system memory */
216 	if (dma_obj->map_noncoherent)
217 		info->flags |= FBINFO_READS_FAST; /* signal caching */
218 	info->screen_size = sizes->surface_height * fb->pitches[0];
219 	info->screen_buffer = map.vaddr;
220 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
221 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
222 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
223 	}
224 	info->fix.smem_len = info->screen_size;
225 
226 	return 0;
227 }
228 
229 static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
230 							  struct drm_fb_helper_surface_size *sizes)
231 {
232 	struct drm_client_buffer *buffer = fb_helper->buffer;
233 	struct fb_info *info = fb_helper->info;
234 	size_t screen_size = buffer->gem->size;
235 	void *screen_buffer;
236 	int ret;
237 
238 	/*
239 	 * Deferred I/O requires struct page for framebuffer memory,
240 	 * which is not guaranteed for all DMA ranges. We thus create
241 	 * a shadow buffer in system memory.
242 	 */
243 	screen_buffer = vzalloc(screen_size);
244 	if (!screen_buffer)
245 		return -ENOMEM;
246 
247 	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
248 
249 	/* screen */
250 	info->flags |= FBINFO_VIRTFB; /* system memory */
251 	info->flags |= FBINFO_READS_FAST; /* signal caching */
252 	info->screen_buffer = screen_buffer;
253 	info->fix.smem_len = screen_size;
254 
255 	fb_helper->fbdefio.delay = HZ / 20;
256 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
257 
258 	info->fbdefio = &fb_helper->fbdefio;
259 	ret = fb_deferred_io_init(info);
260 	if (ret)
261 		goto err_vfree;
262 
263 	return 0;
264 
265 err_vfree:
266 	vfree(screen_buffer);
267 	return ret;
268 }
269 
270 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
271 				     struct drm_fb_helper_surface_size *sizes)
272 {
273 	struct drm_client_dev *client = &fb_helper->client;
274 	struct drm_device *dev = fb_helper->dev;
275 	struct drm_client_buffer *buffer;
276 	struct drm_framebuffer *fb;
277 	struct fb_info *info;
278 	u32 format;
279 	struct iosys_map map;
280 	int ret;
281 
282 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
283 		    sizes->surface_width, sizes->surface_height,
284 		    sizes->surface_bpp);
285 
286 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
287 					     sizes->surface_depth);
288 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
289 					       sizes->surface_height, format);
290 	if (IS_ERR(buffer))
291 		return PTR_ERR(buffer);
292 
293 	fb = buffer->fb;
294 
295 	ret = drm_client_buffer_vmap(buffer, &map);
296 	if (ret) {
297 		goto err_drm_client_buffer_delete;
298 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
299 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
300 		goto err_drm_client_buffer_delete;
301 	}
302 
303 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
304 	fb_helper->buffer = buffer;
305 	fb_helper->fb = fb;
306 
307 	info = drm_fb_helper_alloc_info(fb_helper);
308 	if (IS_ERR(info)) {
309 		ret = PTR_ERR(info);
310 		goto err_drm_client_buffer_vunmap;
311 	}
312 
313 	drm_fb_helper_fill_info(info, fb_helper, sizes);
314 
315 	if (fb->funcs->dirty)
316 		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
317 	else
318 		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
319 	if (ret)
320 		goto err_drm_fb_helper_release_info;
321 
322 	return 0;
323 
324 err_drm_fb_helper_release_info:
325 	drm_fb_helper_release_info(fb_helper);
326 err_drm_client_buffer_vunmap:
327 	fb_helper->fb = NULL;
328 	fb_helper->buffer = NULL;
329 	drm_client_buffer_vunmap(buffer);
330 err_drm_client_buffer_delete:
331 	drm_client_framebuffer_delete(buffer);
332 	return ret;
333 }
334 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
335