xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/export.h>
4 #include <linux/fb.h>
5 #include <linux/vmalloc.h>
6 
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fbdev_dma.h>
9 #include <drm/drm_fb_dma_helper.h>
10 #include <drm/drm_fb_helper.h>
11 #include <drm/drm_framebuffer.h>
12 #include <drm/drm_gem_dma_helper.h>
13 
14 /*
15  * struct fb_ops
16  */
17 
18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19 {
20 	struct drm_fb_helper *fb_helper = info->par;
21 
22 	/* No need to take a ref for fbcon because it unbinds on unregister */
23 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 		return -ENODEV;
25 
26 	return 0;
27 }
28 
29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30 {
31 	struct drm_fb_helper *fb_helper = info->par;
32 
33 	if (user)
34 		module_put(fb_helper->dev->driver->fops->owner);
35 
36 	return 0;
37 }
38 
39 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
40 {
41 	struct drm_fb_helper *fb_helper = info->par;
42 
43 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
44 }
45 
46 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
47 {
48 	struct drm_fb_helper *fb_helper = info->par;
49 
50 	if (!fb_helper->dev)
51 		return;
52 
53 	if (info->fbdefio)
54 		fb_deferred_io_cleanup(info);
55 	drm_fb_helper_fini(fb_helper);
56 
57 	drm_client_buffer_vunmap(fb_helper->buffer);
58 	drm_client_framebuffer_delete(fb_helper->buffer);
59 	drm_client_release(&fb_helper->client);
60 }
61 
62 static const struct fb_ops drm_fbdev_dma_fb_ops = {
63 	.owner = THIS_MODULE,
64 	.fb_open = drm_fbdev_dma_fb_open,
65 	.fb_release = drm_fbdev_dma_fb_release,
66 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
67 	DRM_FB_HELPER_DEFAULT_OPS,
68 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
69 	.fb_mmap = drm_fbdev_dma_fb_mmap,
70 	.fb_destroy = drm_fbdev_dma_fb_destroy,
71 };
72 
73 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
74 				   drm_fb_helper_damage_range,
75 				   drm_fb_helper_damage_area);
76 
77 static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
78 {
79 	struct drm_fb_helper *fb_helper = info->par;
80 	void *shadow = info->screen_buffer;
81 
82 	if (!fb_helper->dev)
83 		return;
84 
85 	if (info->fbdefio)
86 		fb_deferred_io_cleanup(info);
87 	drm_fb_helper_fini(fb_helper);
88 	vfree(shadow);
89 
90 	drm_client_buffer_vunmap(fb_helper->buffer);
91 	drm_client_framebuffer_delete(fb_helper->buffer);
92 	drm_client_release(&fb_helper->client);
93 }
94 
95 static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
96 	.owner = THIS_MODULE,
97 	.fb_open = drm_fbdev_dma_fb_open,
98 	.fb_release = drm_fbdev_dma_fb_release,
99 	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
100 	DRM_FB_HELPER_DEFAULT_OPS,
101 	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
102 };
103 
104 /*
105  * struct drm_fb_helper
106  */
107 
108 static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
109 					   struct drm_clip_rect *clip,
110 					   struct iosys_map *dst)
111 {
112 	struct drm_framebuffer *fb = fb_helper->fb;
113 	size_t offset = clip->y1 * fb->pitches[0];
114 	size_t len = clip->x2 - clip->x1;
115 	unsigned int y;
116 	void *src;
117 
118 	switch (drm_format_info_bpp(fb->format, 0)) {
119 	case 1:
120 		offset += clip->x1 / 8;
121 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
122 		break;
123 	case 2:
124 		offset += clip->x1 / 4;
125 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
126 		break;
127 	case 4:
128 		offset += clip->x1 / 2;
129 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
130 		break;
131 	default:
132 		offset += clip->x1 * fb->format->cpp[0];
133 		len *= fb->format->cpp[0];
134 		break;
135 	}
136 
137 	src = fb_helper->info->screen_buffer + offset;
138 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
139 
140 	for (y = clip->y1; y < clip->y2; y++) {
141 		iosys_map_memcpy_to(dst, 0, src, len);
142 		iosys_map_incr(dst, fb->pitches[0]);
143 		src += fb->pitches[0];
144 	}
145 }
146 
147 static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
148 				     struct drm_clip_rect *clip)
149 {
150 	struct drm_client_buffer *buffer = fb_helper->buffer;
151 	struct iosys_map dst;
152 
153 	/*
154 	 * For fbdev emulation, we only have to protect against fbdev modeset
155 	 * operations. Nothing else will involve the client buffer's BO. So it
156 	 * is sufficient to acquire struct drm_fb_helper.lock here.
157 	 */
158 	mutex_lock(&fb_helper->lock);
159 
160 	dst = buffer->map;
161 	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
162 
163 	mutex_unlock(&fb_helper->lock);
164 
165 	return 0;
166 }
167 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
168 					 struct drm_clip_rect *clip)
169 {
170 	struct drm_device *dev = helper->dev;
171 	int ret;
172 
173 	/* Call damage handlers only if necessary */
174 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
175 		return 0;
176 
177 	if (helper->fb->funcs->dirty) {
178 		ret = drm_fbdev_dma_damage_blit(helper, clip);
179 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
180 			return ret;
181 
182 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
183 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
184 			return ret;
185 	}
186 
187 	return 0;
188 }
189 
190 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
191 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
192 };
193 
194 /*
195  * struct drm_fb_helper
196  */
197 
198 static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
199 						 struct drm_fb_helper_surface_size *sizes)
200 {
201 	struct drm_device *dev = fb_helper->dev;
202 	struct drm_client_buffer *buffer = fb_helper->buffer;
203 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
204 	struct drm_framebuffer *fb = fb_helper->fb;
205 	struct fb_info *info = fb_helper->info;
206 	struct iosys_map map = buffer->map;
207 
208 	info->fbops = &drm_fbdev_dma_fb_ops;
209 
210 	/* screen */
211 	info->flags |= FBINFO_VIRTFB; /* system memory */
212 	if (dma_obj->map_noncoherent)
213 		info->flags |= FBINFO_READS_FAST; /* signal caching */
214 	info->screen_size = sizes->surface_height * fb->pitches[0];
215 	info->screen_buffer = map.vaddr;
216 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
217 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
218 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
219 	}
220 	info->fix.smem_len = info->screen_size;
221 
222 	return 0;
223 }
224 
225 static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
226 							  struct drm_fb_helper_surface_size *sizes)
227 {
228 	struct drm_client_buffer *buffer = fb_helper->buffer;
229 	struct fb_info *info = fb_helper->info;
230 	size_t screen_size = buffer->gem->size;
231 	void *screen_buffer;
232 	int ret;
233 
234 	/*
235 	 * Deferred I/O requires struct page for framebuffer memory,
236 	 * which is not guaranteed for all DMA ranges. We thus create
237 	 * a shadow buffer in system memory.
238 	 */
239 	screen_buffer = vzalloc(screen_size);
240 	if (!screen_buffer)
241 		return -ENOMEM;
242 
243 	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
244 
245 	/* screen */
246 	info->flags |= FBINFO_VIRTFB; /* system memory */
247 	info->flags |= FBINFO_READS_FAST; /* signal caching */
248 	info->screen_buffer = screen_buffer;
249 	info->fix.smem_len = screen_size;
250 
251 	fb_helper->fbdefio.delay = HZ / 20;
252 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
253 
254 	info->fbdefio = &fb_helper->fbdefio;
255 	ret = fb_deferred_io_init(info);
256 	if (ret)
257 		goto err_vfree;
258 
259 	return 0;
260 
261 err_vfree:
262 	vfree(screen_buffer);
263 	return ret;
264 }
265 
266 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
267 				     struct drm_fb_helper_surface_size *sizes)
268 {
269 	struct drm_client_dev *client = &fb_helper->client;
270 	struct drm_device *dev = fb_helper->dev;
271 	struct drm_client_buffer *buffer;
272 	struct drm_framebuffer *fb;
273 	struct fb_info *info;
274 	u32 format;
275 	struct iosys_map map;
276 	int ret;
277 
278 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
279 		    sizes->surface_width, sizes->surface_height,
280 		    sizes->surface_bpp);
281 
282 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
283 					     sizes->surface_depth);
284 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
285 					       sizes->surface_height, format);
286 	if (IS_ERR(buffer))
287 		return PTR_ERR(buffer);
288 
289 	fb = buffer->fb;
290 
291 	ret = drm_client_buffer_vmap(buffer, &map);
292 	if (ret) {
293 		goto err_drm_client_buffer_delete;
294 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
295 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
296 		goto err_drm_client_buffer_delete;
297 	}
298 
299 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
300 	fb_helper->buffer = buffer;
301 	fb_helper->fb = fb;
302 
303 	info = drm_fb_helper_alloc_info(fb_helper);
304 	if (IS_ERR(info)) {
305 		ret = PTR_ERR(info);
306 		goto err_drm_client_buffer_vunmap;
307 	}
308 
309 	drm_fb_helper_fill_info(info, fb_helper, sizes);
310 
311 	if (fb->funcs->dirty)
312 		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
313 	else
314 		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
315 	if (ret)
316 		goto err_drm_fb_helper_release_info;
317 
318 	return 0;
319 
320 err_drm_fb_helper_release_info:
321 	drm_fb_helper_release_info(fb_helper);
322 err_drm_client_buffer_vunmap:
323 	fb_helper->fb = NULL;
324 	fb_helper->buffer = NULL;
325 	drm_client_buffer_vunmap(buffer);
326 err_drm_client_buffer_delete:
327 	drm_client_framebuffer_delete(buffer);
328 	return ret;
329 }
330 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
331