xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision 76544811c850a1f4c055aa182b513b7a843868ea)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/fb.h>
4 #include <linux/vmalloc.h>
5 
6 #include <drm/drm_drv.h>
7 #include <drm/drm_fbdev_dma.h>
8 #include <drm/drm_fb_dma_helper.h>
9 #include <drm/drm_fb_helper.h>
10 #include <drm/drm_framebuffer.h>
11 #include <drm/drm_gem_dma_helper.h>
12 
13 /*
14  * struct fb_ops
15  */
16 
drm_fbdev_dma_fb_open(struct fb_info * info,int user)17 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
18 {
19 	struct drm_fb_helper *fb_helper = info->par;
20 
21 	/* No need to take a ref for fbcon because it unbinds on unregister */
22 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
23 		return -ENODEV;
24 
25 	return 0;
26 }
27 
drm_fbdev_dma_fb_release(struct fb_info * info,int user)28 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
29 {
30 	struct drm_fb_helper *fb_helper = info->par;
31 
32 	if (user)
33 		module_put(fb_helper->dev->driver->fops->owner);
34 
35 	return 0;
36 }
37 
drm_fbdev_dma_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)38 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
39 {
40 	struct drm_fb_helper *fb_helper = info->par;
41 
42 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
43 }
44 
drm_fbdev_dma_fb_destroy(struct fb_info * info)45 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
46 {
47 	struct drm_fb_helper *fb_helper = info->par;
48 
49 	if (!fb_helper->dev)
50 		return;
51 
52 	if (info->fbdefio)
53 		fb_deferred_io_cleanup(info);
54 	drm_fb_helper_fini(fb_helper);
55 
56 	drm_client_buffer_vunmap(fb_helper->buffer);
57 	drm_client_framebuffer_delete(fb_helper->buffer);
58 	drm_client_release(&fb_helper->client);
59 	drm_fb_helper_unprepare(fb_helper);
60 	kfree(fb_helper);
61 }
62 
63 static const struct fb_ops drm_fbdev_dma_fb_ops = {
64 	.owner = THIS_MODULE,
65 	.fb_open = drm_fbdev_dma_fb_open,
66 	.fb_release = drm_fbdev_dma_fb_release,
67 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
68 	DRM_FB_HELPER_DEFAULT_OPS,
69 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
70 	.fb_mmap = drm_fbdev_dma_fb_mmap,
71 	.fb_destroy = drm_fbdev_dma_fb_destroy,
72 };
73 
74 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
75 				   drm_fb_helper_damage_range,
76 				   drm_fb_helper_damage_area);
77 
drm_fbdev_dma_shadowed_fb_destroy(struct fb_info * info)78 static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
79 {
80 	struct drm_fb_helper *fb_helper = info->par;
81 	void *shadow = info->screen_buffer;
82 
83 	if (!fb_helper->dev)
84 		return;
85 
86 	if (info->fbdefio)
87 		fb_deferred_io_cleanup(info);
88 	drm_fb_helper_fini(fb_helper);
89 	vfree(shadow);
90 
91 	drm_client_buffer_vunmap(fb_helper->buffer);
92 	drm_client_framebuffer_delete(fb_helper->buffer);
93 	drm_client_release(&fb_helper->client);
94 	drm_fb_helper_unprepare(fb_helper);
95 	kfree(fb_helper);
96 }
97 
98 static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
99 	.owner = THIS_MODULE,
100 	.fb_open = drm_fbdev_dma_fb_open,
101 	.fb_release = drm_fbdev_dma_fb_release,
102 	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
103 	DRM_FB_HELPER_DEFAULT_OPS,
104 	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
105 };
106 
107 /*
108  * struct drm_fb_helper
109  */
110 
drm_fbdev_dma_damage_blit_real(struct drm_fb_helper * fb_helper,struct drm_clip_rect * clip,struct iosys_map * dst)111 static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
112 					   struct drm_clip_rect *clip,
113 					   struct iosys_map *dst)
114 {
115 	struct drm_framebuffer *fb = fb_helper->fb;
116 	size_t offset = clip->y1 * fb->pitches[0];
117 	size_t len = clip->x2 - clip->x1;
118 	unsigned int y;
119 	void *src;
120 
121 	switch (drm_format_info_bpp(fb->format, 0)) {
122 	case 1:
123 		offset += clip->x1 / 8;
124 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
125 		break;
126 	case 2:
127 		offset += clip->x1 / 4;
128 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
129 		break;
130 	case 4:
131 		offset += clip->x1 / 2;
132 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
133 		break;
134 	default:
135 		offset += clip->x1 * fb->format->cpp[0];
136 		len *= fb->format->cpp[0];
137 		break;
138 	}
139 
140 	src = fb_helper->info->screen_buffer + offset;
141 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
142 
143 	for (y = clip->y1; y < clip->y2; y++) {
144 		iosys_map_memcpy_to(dst, 0, src, len);
145 		iosys_map_incr(dst, fb->pitches[0]);
146 		src += fb->pitches[0];
147 	}
148 }
149 
drm_fbdev_dma_damage_blit(struct drm_fb_helper * fb_helper,struct drm_clip_rect * clip)150 static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
151 				     struct drm_clip_rect *clip)
152 {
153 	struct drm_client_buffer *buffer = fb_helper->buffer;
154 	struct iosys_map dst;
155 
156 	/*
157 	 * For fbdev emulation, we only have to protect against fbdev modeset
158 	 * operations. Nothing else will involve the client buffer's BO. So it
159 	 * is sufficient to acquire struct drm_fb_helper.lock here.
160 	 */
161 	mutex_lock(&fb_helper->lock);
162 
163 	dst = buffer->map;
164 	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
165 
166 	mutex_unlock(&fb_helper->lock);
167 
168 	return 0;
169 }
drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper * helper,struct drm_clip_rect * clip)170 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
171 					 struct drm_clip_rect *clip)
172 {
173 	struct drm_device *dev = helper->dev;
174 	int ret;
175 
176 	/* Call damage handlers only if necessary */
177 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
178 		return 0;
179 
180 	if (helper->fb->funcs->dirty) {
181 		ret = drm_fbdev_dma_damage_blit(helper, clip);
182 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
183 			return ret;
184 
185 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
186 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
187 			return ret;
188 	}
189 
190 	return 0;
191 }
192 
193 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
194 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
195 };
196 
197 /*
198  * struct drm_fb_helper
199  */
200 
drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)201 static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
202 						 struct drm_fb_helper_surface_size *sizes)
203 {
204 	struct drm_device *dev = fb_helper->dev;
205 	struct drm_client_buffer *buffer = fb_helper->buffer;
206 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
207 	struct drm_framebuffer *fb = fb_helper->fb;
208 	struct fb_info *info = fb_helper->info;
209 	struct iosys_map map = buffer->map;
210 
211 	info->fbops = &drm_fbdev_dma_fb_ops;
212 
213 	/* screen */
214 	info->flags |= FBINFO_VIRTFB; /* system memory */
215 	if (dma_obj->map_noncoherent)
216 		info->flags |= FBINFO_READS_FAST; /* signal caching */
217 	info->screen_size = sizes->surface_height * fb->pitches[0];
218 	info->screen_buffer = map.vaddr;
219 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
220 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
221 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
222 	}
223 	info->fix.smem_len = info->screen_size;
224 
225 	return 0;
226 }
227 
drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)228 static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
229 							  struct drm_fb_helper_surface_size *sizes)
230 {
231 	struct drm_client_buffer *buffer = fb_helper->buffer;
232 	struct fb_info *info = fb_helper->info;
233 	size_t screen_size = buffer->gem->size;
234 	void *screen_buffer;
235 	int ret;
236 
237 	/*
238 	 * Deferred I/O requires struct page for framebuffer memory,
239 	 * which is not guaranteed for all DMA ranges. We thus create
240 	 * a shadow buffer in system memory.
241 	 */
242 	screen_buffer = vzalloc(screen_size);
243 	if (!screen_buffer)
244 		return -ENOMEM;
245 
246 	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
247 
248 	/* screen */
249 	info->flags |= FBINFO_VIRTFB; /* system memory */
250 	info->flags |= FBINFO_READS_FAST; /* signal caching */
251 	info->screen_buffer = screen_buffer;
252 	info->fix.smem_len = screen_size;
253 
254 	fb_helper->fbdefio.delay = HZ / 20;
255 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
256 
257 	info->fbdefio = &fb_helper->fbdefio;
258 	ret = fb_deferred_io_init(info);
259 	if (ret)
260 		goto err_vfree;
261 
262 	return 0;
263 
264 err_vfree:
265 	vfree(screen_buffer);
266 	return ret;
267 }
268 
drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)269 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
270 				     struct drm_fb_helper_surface_size *sizes)
271 {
272 	struct drm_client_dev *client = &fb_helper->client;
273 	struct drm_device *dev = fb_helper->dev;
274 	struct drm_client_buffer *buffer;
275 	struct drm_framebuffer *fb;
276 	struct fb_info *info;
277 	u32 format;
278 	struct iosys_map map;
279 	int ret;
280 
281 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
282 		    sizes->surface_width, sizes->surface_height,
283 		    sizes->surface_bpp);
284 
285 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
286 					     sizes->surface_depth);
287 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
288 					       sizes->surface_height, format);
289 	if (IS_ERR(buffer))
290 		return PTR_ERR(buffer);
291 
292 	fb = buffer->fb;
293 
294 	ret = drm_client_buffer_vmap(buffer, &map);
295 	if (ret) {
296 		goto err_drm_client_buffer_delete;
297 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
298 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
299 		goto err_drm_client_buffer_delete;
300 	}
301 
302 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
303 	fb_helper->buffer = buffer;
304 	fb_helper->fb = fb;
305 
306 	info = drm_fb_helper_alloc_info(fb_helper);
307 	if (IS_ERR(info)) {
308 		ret = PTR_ERR(info);
309 		goto err_drm_client_buffer_vunmap;
310 	}
311 
312 	drm_fb_helper_fill_info(info, fb_helper, sizes);
313 
314 	if (fb->funcs->dirty)
315 		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
316 	else
317 		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
318 	if (ret)
319 		goto err_drm_fb_helper_release_info;
320 
321 	return 0;
322 
323 err_drm_fb_helper_release_info:
324 	drm_fb_helper_release_info(fb_helper);
325 err_drm_client_buffer_vunmap:
326 	fb_helper->fb = NULL;
327 	fb_helper->buffer = NULL;
328 	drm_client_buffer_vunmap(buffer);
329 err_drm_client_buffer_delete:
330 	drm_client_framebuffer_delete(buffer);
331 	return ret;
332 }
333 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
334