xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision b9a14d54ab2bf0c09409f373a2120de65046178a)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/fb.h>
4 
5 #include <drm/drm_crtc_helper.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_fb_dma_helper.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_dma_helper.h>
11 
12 #include <drm/drm_fbdev_dma.h>
13 
14 /*
15  * struct fb_ops
16  */
17 
18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19 {
20 	struct drm_fb_helper *fb_helper = info->par;
21 
22 	/* No need to take a ref for fbcon because it unbinds on unregister */
23 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 		return -ENODEV;
25 
26 	return 0;
27 }
28 
29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30 {
31 	struct drm_fb_helper *fb_helper = info->par;
32 
33 	if (user)
34 		module_put(fb_helper->dev->driver->fops->owner);
35 
36 	return 0;
37 }
38 
39 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
40 {
41 	struct drm_fb_helper *fb_helper = info->par;
42 
43 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
44 }
45 
46 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
47 {
48 	struct drm_fb_helper *fb_helper = info->par;
49 
50 	if (!fb_helper->dev)
51 		return;
52 
53 	fb_deferred_io_cleanup(info);
54 	drm_fb_helper_fini(fb_helper);
55 
56 	drm_client_buffer_vunmap(fb_helper->buffer);
57 	drm_client_framebuffer_delete(fb_helper->buffer);
58 	drm_client_release(&fb_helper->client);
59 	drm_fb_helper_unprepare(fb_helper);
60 	kfree(fb_helper);
61 }
62 
63 static const struct fb_ops drm_fbdev_dma_fb_ops = {
64 	.owner = THIS_MODULE,
65 	.fb_open = drm_fbdev_dma_fb_open,
66 	.fb_release = drm_fbdev_dma_fb_release,
67 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
68 	DRM_FB_HELPER_DEFAULT_OPS,
69 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
70 	.fb_mmap = drm_fbdev_dma_fb_mmap,
71 	.fb_destroy = drm_fbdev_dma_fb_destroy,
72 };
73 
74 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
75 				   drm_fb_helper_damage_range,
76 				   drm_fb_helper_damage_area);
77 
78 static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
79 {
80 	struct drm_fb_helper *fb_helper = info->par;
81 	struct drm_framebuffer *fb = fb_helper->fb;
82 	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
83 
84 	if (!dma->map_noncoherent)
85 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
86 
87 	return fb_deferred_io_mmap(info, vma);
88 }
89 
90 static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
91 	.owner = THIS_MODULE,
92 	.fb_open = drm_fbdev_dma_fb_open,
93 	.fb_release = drm_fbdev_dma_fb_release,
94 	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
95 	DRM_FB_HELPER_DEFAULT_OPS,
96 	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
97 	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
98 	.fb_destroy = drm_fbdev_dma_fb_destroy,
99 };
100 
101 /*
102  * struct drm_fb_helper
103  */
104 
105 static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
106 					 struct drm_fb_helper_surface_size *sizes)
107 {
108 	return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
109 }
110 
111 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
112 					 struct drm_clip_rect *clip)
113 {
114 	struct drm_device *dev = helper->dev;
115 	int ret;
116 
117 	/* Call damage handlers only if necessary */
118 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
119 		return 0;
120 
121 	if (helper->fb->funcs->dirty) {
122 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
123 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
124 			return ret;
125 	}
126 
127 	return 0;
128 }
129 
130 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
131 	.fb_probe = drm_fbdev_dma_helper_fb_probe,
132 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
133 };
134 
135 /*
136  * struct drm_fb_helper
137  */
138 
139 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
140 				     struct drm_fb_helper_surface_size *sizes)
141 {
142 	struct drm_client_dev *client = &fb_helper->client;
143 	struct drm_device *dev = fb_helper->dev;
144 	bool use_deferred_io = false;
145 	struct drm_client_buffer *buffer;
146 	struct drm_gem_dma_object *dma_obj;
147 	struct drm_framebuffer *fb;
148 	struct fb_info *info;
149 	u32 format;
150 	struct iosys_map map;
151 	int ret;
152 
153 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
154 		    sizes->surface_width, sizes->surface_height,
155 		    sizes->surface_bpp);
156 
157 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
158 					     sizes->surface_depth);
159 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
160 					       sizes->surface_height, format);
161 	if (IS_ERR(buffer))
162 		return PTR_ERR(buffer);
163 	dma_obj = to_drm_gem_dma_obj(buffer->gem);
164 
165 	fb = buffer->fb;
166 
167 	/*
168 	 * Deferred I/O requires struct page for framebuffer memory,
169 	 * which is not guaranteed for all DMA ranges. We thus only
170 	 * install deferred I/O if we have a framebuffer that requires
171 	 * it.
172 	 */
173 	if (fb->funcs->dirty)
174 		use_deferred_io = true;
175 
176 	ret = drm_client_buffer_vmap(buffer, &map);
177 	if (ret) {
178 		goto err_drm_client_buffer_delete;
179 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
180 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
181 		goto err_drm_client_buffer_delete;
182 	}
183 
184 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
185 	fb_helper->buffer = buffer;
186 	fb_helper->fb = fb;
187 
188 	info = drm_fb_helper_alloc_info(fb_helper);
189 	if (IS_ERR(info)) {
190 		ret = PTR_ERR(info);
191 		goto err_drm_client_buffer_vunmap;
192 	}
193 
194 	drm_fb_helper_fill_info(info, fb_helper, sizes);
195 
196 	if (use_deferred_io)
197 		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
198 	else
199 		info->fbops = &drm_fbdev_dma_fb_ops;
200 
201 	/* screen */
202 	info->flags |= FBINFO_VIRTFB; /* system memory */
203 	if (dma_obj->map_noncoherent)
204 		info->flags |= FBINFO_READS_FAST; /* signal caching */
205 	info->screen_size = sizes->surface_height * fb->pitches[0];
206 	info->screen_buffer = map.vaddr;
207 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
208 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
209 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
210 	}
211 	info->fix.smem_len = info->screen_size;
212 
213 	/*
214 	 * Only set up deferred I/O if the screen buffer supports
215 	 * it. If this disagrees with the previous test for ->dirty,
216 	 * mmap on the /dev/fb file might not work correctly.
217 	 */
218 	if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
219 		unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
220 
221 		if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
222 			use_deferred_io = false;
223 	}
224 
225 	/* deferred I/O */
226 	if (use_deferred_io) {
227 		fb_helper->fbdefio.delay = HZ / 20;
228 		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
229 
230 		info->fbdefio = &fb_helper->fbdefio;
231 		ret = fb_deferred_io_init(info);
232 		if (ret)
233 			goto err_drm_fb_helper_release_info;
234 	}
235 
236 	return 0;
237 
238 err_drm_fb_helper_release_info:
239 	drm_fb_helper_release_info(fb_helper);
240 err_drm_client_buffer_vunmap:
241 	fb_helper->fb = NULL;
242 	fb_helper->buffer = NULL;
243 	drm_client_buffer_vunmap(buffer);
244 err_drm_client_buffer_delete:
245 	drm_client_framebuffer_delete(buffer);
246 	return ret;
247 }
248 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
249 
250 /*
251  * struct drm_client_funcs
252  */
253 
254 static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
255 {
256 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
257 
258 	if (fb_helper->info) {
259 		drm_fb_helper_unregister_info(fb_helper);
260 	} else {
261 		drm_client_release(&fb_helper->client);
262 		drm_fb_helper_unprepare(fb_helper);
263 		kfree(fb_helper);
264 	}
265 }
266 
267 static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
268 {
269 	drm_fb_helper_lastclose(client->dev);
270 
271 	return 0;
272 }
273 
274 static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
275 {
276 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
277 	struct drm_device *dev = client->dev;
278 	int ret;
279 
280 	if (dev->fb_helper)
281 		return drm_fb_helper_hotplug_event(dev->fb_helper);
282 
283 	ret = drm_fb_helper_init(dev, fb_helper);
284 	if (ret)
285 		goto err_drm_err;
286 
287 	if (!drm_drv_uses_atomic_modeset(dev))
288 		drm_helper_disable_unused_functions(dev);
289 
290 	ret = drm_fb_helper_initial_config(fb_helper);
291 	if (ret)
292 		goto err_drm_fb_helper_fini;
293 
294 	return 0;
295 
296 err_drm_fb_helper_fini:
297 	drm_fb_helper_fini(fb_helper);
298 err_drm_err:
299 	drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
300 	return ret;
301 }
302 
303 static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
304 	.owner		= THIS_MODULE,
305 	.unregister	= drm_fbdev_dma_client_unregister,
306 	.restore	= drm_fbdev_dma_client_restore,
307 	.hotplug	= drm_fbdev_dma_client_hotplug,
308 };
309 
310 /**
311  * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
312  * @dev: DRM device
313  * @preferred_bpp: Preferred bits per pixel for the device.
314  *                 32 is used if this is zero.
315  *
316  * This function sets up fbdev emulation for GEM DMA drivers that support
317  * dumb buffers with a virtual address and that can be mmap'ed.
318  * drm_fbdev_dma_setup() shall be called after the DRM driver registered
319  * the new DRM device with drm_dev_register().
320  *
321  * Restore, hotplug events and teardown are all taken care of. Drivers that do
322  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
323  * Simple drivers might use drm_mode_config_helper_suspend().
324  *
325  * This function is safe to call even when there are no connectors present.
326  * Setup will be retried on the next hotplug event.
327  *
328  * The fbdev is destroyed by drm_dev_unregister().
329  */
330 void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
331 {
332 	struct drm_fb_helper *fb_helper;
333 	int ret;
334 
335 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
336 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
337 
338 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
339 	if (!fb_helper)
340 		return;
341 	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
342 
343 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
344 	if (ret) {
345 		drm_err(dev, "Failed to register client: %d\n", ret);
346 		goto err_drm_client_init;
347 	}
348 
349 	drm_client_register(&fb_helper->client);
350 
351 	return;
352 
353 err_drm_client_init:
354 	drm_fb_helper_unprepare(fb_helper);
355 	kfree(fb_helper);
356 }
357 EXPORT_SYMBOL(drm_fbdev_dma_setup);
358