1 // SPDX-License-Identifier: MIT
2
3 #include <linux/fb.h>
4
5 #include <drm/drm_crtc_helper.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_fb_dma_helper.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_dma_helper.h>
11
12 #include <drm/drm_fbdev_dma.h>
13
14 /*
15 * struct fb_ops
16 */
17
drm_fbdev_dma_fb_open(struct fb_info * info,int user)18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19 {
20 struct drm_fb_helper *fb_helper = info->par;
21
22 /* No need to take a ref for fbcon because it unbinds on unregister */
23 if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 return -ENODEV;
25
26 return 0;
27 }
28
drm_fbdev_dma_fb_release(struct fb_info * info,int user)29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30 {
31 struct drm_fb_helper *fb_helper = info->par;
32
33 if (user)
34 module_put(fb_helper->dev->driver->fops->owner);
35
36 return 0;
37 }
38
drm_fbdev_dma_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)39 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
40 {
41 struct drm_fb_helper *fb_helper = info->par;
42
43 return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
44 }
45
drm_fbdev_dma_fb_destroy(struct fb_info * info)46 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
47 {
48 struct drm_fb_helper *fb_helper = info->par;
49
50 if (!fb_helper->dev)
51 return;
52
53 if (info->fbdefio)
54 fb_deferred_io_cleanup(info);
55 drm_fb_helper_fini(fb_helper);
56
57 drm_client_buffer_vunmap(fb_helper->buffer);
58 drm_client_framebuffer_delete(fb_helper->buffer);
59 drm_client_release(&fb_helper->client);
60 drm_fb_helper_unprepare(fb_helper);
61 kfree(fb_helper);
62 }
63
64 static const struct fb_ops drm_fbdev_dma_fb_ops = {
65 .owner = THIS_MODULE,
66 .fb_open = drm_fbdev_dma_fb_open,
67 .fb_release = drm_fbdev_dma_fb_release,
68 __FB_DEFAULT_DMAMEM_OPS_RDWR,
69 DRM_FB_HELPER_DEFAULT_OPS,
70 __FB_DEFAULT_DMAMEM_OPS_DRAW,
71 .fb_mmap = drm_fbdev_dma_fb_mmap,
72 .fb_destroy = drm_fbdev_dma_fb_destroy,
73 };
74
75 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
76 drm_fb_helper_damage_range,
77 drm_fb_helper_damage_area);
78
drm_fbdev_dma_deferred_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)79 static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
80 {
81 struct drm_fb_helper *fb_helper = info->par;
82 struct drm_framebuffer *fb = fb_helper->fb;
83 struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
84
85 if (!dma->map_noncoherent)
86 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
87
88 return fb_deferred_io_mmap(info, vma);
89 }
90
91 static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
92 .owner = THIS_MODULE,
93 .fb_open = drm_fbdev_dma_fb_open,
94 .fb_release = drm_fbdev_dma_fb_release,
95 __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
96 DRM_FB_HELPER_DEFAULT_OPS,
97 __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
98 .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
99 .fb_destroy = drm_fbdev_dma_fb_destroy,
100 };
101
102 /*
103 * struct drm_fb_helper
104 */
105
drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)106 static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
107 struct drm_fb_helper_surface_size *sizes)
108 {
109 struct drm_client_dev *client = &fb_helper->client;
110 struct drm_device *dev = fb_helper->dev;
111 bool use_deferred_io = false;
112 struct drm_client_buffer *buffer;
113 struct drm_gem_dma_object *dma_obj;
114 struct drm_framebuffer *fb;
115 struct fb_info *info;
116 u32 format;
117 struct iosys_map map;
118 int ret;
119
120 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
121 sizes->surface_width, sizes->surface_height,
122 sizes->surface_bpp);
123
124 format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
125 sizes->surface_depth);
126 buffer = drm_client_framebuffer_create(client, sizes->surface_width,
127 sizes->surface_height, format);
128 if (IS_ERR(buffer))
129 return PTR_ERR(buffer);
130 dma_obj = to_drm_gem_dma_obj(buffer->gem);
131
132 fb = buffer->fb;
133
134 /*
135 * Deferred I/O requires struct page for framebuffer memory,
136 * which is not guaranteed for all DMA ranges. We thus only
137 * install deferred I/O if we have a framebuffer that requires
138 * it.
139 */
140 if (fb->funcs->dirty)
141 use_deferred_io = true;
142
143 ret = drm_client_buffer_vmap(buffer, &map);
144 if (ret) {
145 goto err_drm_client_buffer_delete;
146 } else if (drm_WARN_ON(dev, map.is_iomem)) {
147 ret = -ENODEV; /* I/O memory not supported; use generic emulation */
148 goto err_drm_client_buffer_delete;
149 }
150
151 fb_helper->buffer = buffer;
152 fb_helper->fb = fb;
153
154 info = drm_fb_helper_alloc_info(fb_helper);
155 if (IS_ERR(info)) {
156 ret = PTR_ERR(info);
157 goto err_drm_client_buffer_vunmap;
158 }
159
160 drm_fb_helper_fill_info(info, fb_helper, sizes);
161
162 if (use_deferred_io)
163 info->fbops = &drm_fbdev_dma_deferred_fb_ops;
164 else
165 info->fbops = &drm_fbdev_dma_fb_ops;
166
167 /* screen */
168 info->flags |= FBINFO_VIRTFB; /* system memory */
169 if (dma_obj->map_noncoherent)
170 info->flags |= FBINFO_READS_FAST; /* signal caching */
171 info->screen_size = sizes->surface_height * fb->pitches[0];
172 info->screen_buffer = map.vaddr;
173 if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
174 if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
175 info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
176 }
177 info->fix.smem_len = info->screen_size;
178
179 /*
180 * Only set up deferred I/O if the screen buffer supports
181 * it. If this disagrees with the previous test for ->dirty,
182 * mmap on the /dev/fb file might not work correctly.
183 */
184 if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
185 unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
186
187 if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
188 use_deferred_io = false;
189 }
190
191 /* deferred I/O */
192 if (use_deferred_io) {
193 fb_helper->fbdefio.delay = HZ / 20;
194 fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
195
196 info->fbdefio = &fb_helper->fbdefio;
197 ret = fb_deferred_io_init(info);
198 if (ret)
199 goto err_drm_fb_helper_release_info;
200 }
201
202 return 0;
203
204 err_drm_fb_helper_release_info:
205 drm_fb_helper_release_info(fb_helper);
206 err_drm_client_buffer_vunmap:
207 fb_helper->fb = NULL;
208 fb_helper->buffer = NULL;
209 drm_client_buffer_vunmap(buffer);
210 err_drm_client_buffer_delete:
211 drm_client_framebuffer_delete(buffer);
212 return ret;
213 }
214
drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper * helper,struct drm_clip_rect * clip)215 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
216 struct drm_clip_rect *clip)
217 {
218 struct drm_device *dev = helper->dev;
219 int ret;
220
221 /* Call damage handlers only if necessary */
222 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
223 return 0;
224
225 if (helper->fb->funcs->dirty) {
226 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
227 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
228 return ret;
229 }
230
231 return 0;
232 }
233
234 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
235 .fb_probe = drm_fbdev_dma_helper_fb_probe,
236 .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
237 };
238
239 /*
240 * struct drm_client_funcs
241 */
242
drm_fbdev_dma_client_unregister(struct drm_client_dev * client)243 static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
244 {
245 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
246
247 if (fb_helper->info) {
248 drm_fb_helper_unregister_info(fb_helper);
249 } else {
250 drm_client_release(&fb_helper->client);
251 drm_fb_helper_unprepare(fb_helper);
252 kfree(fb_helper);
253 }
254 }
255
drm_fbdev_dma_client_restore(struct drm_client_dev * client)256 static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
257 {
258 drm_fb_helper_lastclose(client->dev);
259
260 return 0;
261 }
262
drm_fbdev_dma_client_hotplug(struct drm_client_dev * client)263 static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
264 {
265 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
266 struct drm_device *dev = client->dev;
267 int ret;
268
269 if (dev->fb_helper)
270 return drm_fb_helper_hotplug_event(dev->fb_helper);
271
272 ret = drm_fb_helper_init(dev, fb_helper);
273 if (ret)
274 goto err_drm_err;
275
276 if (!drm_drv_uses_atomic_modeset(dev))
277 drm_helper_disable_unused_functions(dev);
278
279 ret = drm_fb_helper_initial_config(fb_helper);
280 if (ret)
281 goto err_drm_fb_helper_fini;
282
283 return 0;
284
285 err_drm_fb_helper_fini:
286 drm_fb_helper_fini(fb_helper);
287 err_drm_err:
288 drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
289 return ret;
290 }
291
292 static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
293 .owner = THIS_MODULE,
294 .unregister = drm_fbdev_dma_client_unregister,
295 .restore = drm_fbdev_dma_client_restore,
296 .hotplug = drm_fbdev_dma_client_hotplug,
297 };
298
299 /**
300 * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
301 * @dev: DRM device
302 * @preferred_bpp: Preferred bits per pixel for the device.
303 * 32 is used if this is zero.
304 *
305 * This function sets up fbdev emulation for GEM DMA drivers that support
306 * dumb buffers with a virtual address and that can be mmap'ed.
307 * drm_fbdev_dma_setup() shall be called after the DRM driver registered
308 * the new DRM device with drm_dev_register().
309 *
310 * Restore, hotplug events and teardown are all taken care of. Drivers that do
311 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
312 * Simple drivers might use drm_mode_config_helper_suspend().
313 *
314 * This function is safe to call even when there are no connectors present.
315 * Setup will be retried on the next hotplug event.
316 *
317 * The fbdev is destroyed by drm_dev_unregister().
318 */
drm_fbdev_dma_setup(struct drm_device * dev,unsigned int preferred_bpp)319 void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
320 {
321 struct drm_fb_helper *fb_helper;
322 int ret;
323
324 drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
325 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
326
327 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
328 if (!fb_helper)
329 return;
330 drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
331
332 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
333 if (ret) {
334 drm_err(dev, "Failed to register client: %d\n", ret);
335 goto err_drm_client_init;
336 }
337
338 drm_client_register(&fb_helper->client);
339
340 return;
341
342 err_drm_client_init:
343 drm_fb_helper_unprepare(fb_helper);
344 kfree(fb_helper);
345 }
346 EXPORT_SYMBOL(drm_fbdev_dma_setup);
347