1 // SPDX-License-Identifier: MIT
2
3 #include <linux/fb.h>
4
5 #include <drm/drm_drv.h>
6 #include <drm/drm_fbdev_dma.h>
7 #include <drm/drm_fb_dma_helper.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_dma_helper.h>
11
12 /*
13 * struct fb_ops
14 */
15
drm_fbdev_dma_fb_open(struct fb_info * info,int user)16 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
17 {
18 struct drm_fb_helper *fb_helper = info->par;
19
20 /* No need to take a ref for fbcon because it unbinds on unregister */
21 if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
22 return -ENODEV;
23
24 return 0;
25 }
26
drm_fbdev_dma_fb_release(struct fb_info * info,int user)27 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
28 {
29 struct drm_fb_helper *fb_helper = info->par;
30
31 if (user)
32 module_put(fb_helper->dev->driver->fops->owner);
33
34 return 0;
35 }
36
drm_fbdev_dma_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)37 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
38 {
39 struct drm_fb_helper *fb_helper = info->par;
40
41 return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
42 }
43
drm_fbdev_dma_fb_destroy(struct fb_info * info)44 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
45 {
46 struct drm_fb_helper *fb_helper = info->par;
47
48 if (!fb_helper->dev)
49 return;
50
51 if (info->fbdefio)
52 fb_deferred_io_cleanup(info);
53 drm_fb_helper_fini(fb_helper);
54
55 drm_client_buffer_vunmap(fb_helper->buffer);
56 drm_client_framebuffer_delete(fb_helper->buffer);
57 drm_client_release(&fb_helper->client);
58 drm_fb_helper_unprepare(fb_helper);
59 kfree(fb_helper);
60 }
61
62 static const struct fb_ops drm_fbdev_dma_fb_ops = {
63 .owner = THIS_MODULE,
64 .fb_open = drm_fbdev_dma_fb_open,
65 .fb_release = drm_fbdev_dma_fb_release,
66 __FB_DEFAULT_DMAMEM_OPS_RDWR,
67 DRM_FB_HELPER_DEFAULT_OPS,
68 __FB_DEFAULT_DMAMEM_OPS_DRAW,
69 .fb_mmap = drm_fbdev_dma_fb_mmap,
70 .fb_destroy = drm_fbdev_dma_fb_destroy,
71 };
72
73 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
74 drm_fb_helper_damage_range,
75 drm_fb_helper_damage_area);
76
drm_fbdev_dma_deferred_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)77 static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
78 {
79 struct drm_fb_helper *fb_helper = info->par;
80 struct drm_framebuffer *fb = fb_helper->fb;
81 struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
82
83 if (!dma->map_noncoherent)
84 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
85
86 return fb_deferred_io_mmap(info, vma);
87 }
88
89 static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
90 .owner = THIS_MODULE,
91 .fb_open = drm_fbdev_dma_fb_open,
92 .fb_release = drm_fbdev_dma_fb_release,
93 __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
94 DRM_FB_HELPER_DEFAULT_OPS,
95 __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
96 .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
97 .fb_destroy = drm_fbdev_dma_fb_destroy,
98 };
99
100 /*
101 * struct drm_fb_helper
102 */
103
drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper * helper,struct drm_clip_rect * clip)104 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
105 struct drm_clip_rect *clip)
106 {
107 struct drm_device *dev = helper->dev;
108 int ret;
109
110 /* Call damage handlers only if necessary */
111 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
112 return 0;
113
114 if (helper->fb->funcs->dirty) {
115 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
116 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
117 return ret;
118 }
119
120 return 0;
121 }
122
123 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
124 .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
125 };
126
127 /*
128 * struct drm_fb_helper
129 */
130
drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)131 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
132 struct drm_fb_helper_surface_size *sizes)
133 {
134 struct drm_client_dev *client = &fb_helper->client;
135 struct drm_device *dev = fb_helper->dev;
136 bool use_deferred_io = false;
137 struct drm_client_buffer *buffer;
138 struct drm_gem_dma_object *dma_obj;
139 struct drm_framebuffer *fb;
140 struct fb_info *info;
141 u32 format;
142 struct iosys_map map;
143 int ret;
144
145 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
146 sizes->surface_width, sizes->surface_height,
147 sizes->surface_bpp);
148
149 format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
150 sizes->surface_depth);
151 buffer = drm_client_framebuffer_create(client, sizes->surface_width,
152 sizes->surface_height, format);
153 if (IS_ERR(buffer))
154 return PTR_ERR(buffer);
155 dma_obj = to_drm_gem_dma_obj(buffer->gem);
156
157 fb = buffer->fb;
158
159 /*
160 * Deferred I/O requires struct page for framebuffer memory,
161 * which is not guaranteed for all DMA ranges. We thus only
162 * install deferred I/O if we have a framebuffer that requires
163 * it.
164 */
165 if (fb->funcs->dirty)
166 use_deferred_io = true;
167
168 ret = drm_client_buffer_vmap(buffer, &map);
169 if (ret) {
170 goto err_drm_client_buffer_delete;
171 } else if (drm_WARN_ON(dev, map.is_iomem)) {
172 ret = -ENODEV; /* I/O memory not supported; use generic emulation */
173 goto err_drm_client_buffer_delete;
174 }
175
176 fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
177 fb_helper->buffer = buffer;
178 fb_helper->fb = fb;
179
180 info = drm_fb_helper_alloc_info(fb_helper);
181 if (IS_ERR(info)) {
182 ret = PTR_ERR(info);
183 goto err_drm_client_buffer_vunmap;
184 }
185
186 drm_fb_helper_fill_info(info, fb_helper, sizes);
187
188 if (use_deferred_io)
189 info->fbops = &drm_fbdev_dma_deferred_fb_ops;
190 else
191 info->fbops = &drm_fbdev_dma_fb_ops;
192
193 /* screen */
194 info->flags |= FBINFO_VIRTFB; /* system memory */
195 if (dma_obj->map_noncoherent)
196 info->flags |= FBINFO_READS_FAST; /* signal caching */
197 info->screen_size = sizes->surface_height * fb->pitches[0];
198 info->screen_buffer = map.vaddr;
199 if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
200 if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
201 info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
202 }
203 info->fix.smem_len = info->screen_size;
204
205 /*
206 * Only set up deferred I/O if the screen buffer supports
207 * it. If this disagrees with the previous test for ->dirty,
208 * mmap on the /dev/fb file might not work correctly.
209 */
210 if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
211 unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
212
213 if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
214 use_deferred_io = false;
215 }
216
217 /* deferred I/O */
218 if (use_deferred_io) {
219 fb_helper->fbdefio.delay = HZ / 20;
220 fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
221
222 info->fbdefio = &fb_helper->fbdefio;
223 ret = fb_deferred_io_init(info);
224 if (ret)
225 goto err_drm_fb_helper_release_info;
226 }
227
228 return 0;
229
230 err_drm_fb_helper_release_info:
231 drm_fb_helper_release_info(fb_helper);
232 err_drm_client_buffer_vunmap:
233 fb_helper->fb = NULL;
234 fb_helper->buffer = NULL;
235 drm_client_buffer_vunmap(buffer);
236 err_drm_client_buffer_delete:
237 drm_client_framebuffer_delete(buffer);
238 return ret;
239 }
240 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
241