xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision 663269cebc7157e487400c4aeee1f765546a9c98)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/fb.h>
4 
5 #include <drm/drm_drv.h>
6 #include <drm/drm_fbdev_dma.h>
7 #include <drm/drm_fb_dma_helper.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_dma_helper.h>
11 
12 /*
13  * struct fb_ops
14  */
15 
16 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
17 {
18 	struct drm_fb_helper *fb_helper = info->par;
19 
20 	/* No need to take a ref for fbcon because it unbinds on unregister */
21 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
22 		return -ENODEV;
23 
24 	return 0;
25 }
26 
27 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
28 {
29 	struct drm_fb_helper *fb_helper = info->par;
30 
31 	if (user)
32 		module_put(fb_helper->dev->driver->fops->owner);
33 
34 	return 0;
35 }
36 
37 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
38 {
39 	struct drm_fb_helper *fb_helper = info->par;
40 
41 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
42 }
43 
44 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
45 {
46 	struct drm_fb_helper *fb_helper = info->par;
47 
48 	if (!fb_helper->dev)
49 		return;
50 
51 	fb_deferred_io_cleanup(info);
52 	drm_fb_helper_fini(fb_helper);
53 
54 	drm_client_buffer_vunmap(fb_helper->buffer);
55 	drm_client_framebuffer_delete(fb_helper->buffer);
56 	drm_client_release(&fb_helper->client);
57 	drm_fb_helper_unprepare(fb_helper);
58 	kfree(fb_helper);
59 }
60 
61 static const struct fb_ops drm_fbdev_dma_fb_ops = {
62 	.owner = THIS_MODULE,
63 	.fb_open = drm_fbdev_dma_fb_open,
64 	.fb_release = drm_fbdev_dma_fb_release,
65 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
66 	DRM_FB_HELPER_DEFAULT_OPS,
67 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
68 	.fb_mmap = drm_fbdev_dma_fb_mmap,
69 	.fb_destroy = drm_fbdev_dma_fb_destroy,
70 };
71 
72 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
73 				   drm_fb_helper_damage_range,
74 				   drm_fb_helper_damage_area);
75 
76 static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
77 {
78 	struct drm_fb_helper *fb_helper = info->par;
79 	struct drm_framebuffer *fb = fb_helper->fb;
80 	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
81 
82 	if (!dma->map_noncoherent)
83 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
84 
85 	return fb_deferred_io_mmap(info, vma);
86 }
87 
88 static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
89 	.owner = THIS_MODULE,
90 	.fb_open = drm_fbdev_dma_fb_open,
91 	.fb_release = drm_fbdev_dma_fb_release,
92 	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
93 	DRM_FB_HELPER_DEFAULT_OPS,
94 	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
95 	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
96 	.fb_destroy = drm_fbdev_dma_fb_destroy,
97 };
98 
99 /*
100  * struct drm_fb_helper
101  */
102 
103 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
104 					 struct drm_clip_rect *clip)
105 {
106 	struct drm_device *dev = helper->dev;
107 	int ret;
108 
109 	/* Call damage handlers only if necessary */
110 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
111 		return 0;
112 
113 	if (helper->fb->funcs->dirty) {
114 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
115 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
116 			return ret;
117 	}
118 
119 	return 0;
120 }
121 
122 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
123 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
124 };
125 
126 /*
127  * struct drm_fb_helper
128  */
129 
130 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
131 				     struct drm_fb_helper_surface_size *sizes)
132 {
133 	struct drm_client_dev *client = &fb_helper->client;
134 	struct drm_device *dev = fb_helper->dev;
135 	bool use_deferred_io = false;
136 	struct drm_client_buffer *buffer;
137 	struct drm_gem_dma_object *dma_obj;
138 	struct drm_framebuffer *fb;
139 	struct fb_info *info;
140 	u32 format;
141 	struct iosys_map map;
142 	int ret;
143 
144 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
145 		    sizes->surface_width, sizes->surface_height,
146 		    sizes->surface_bpp);
147 
148 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
149 					     sizes->surface_depth);
150 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
151 					       sizes->surface_height, format);
152 	if (IS_ERR(buffer))
153 		return PTR_ERR(buffer);
154 	dma_obj = to_drm_gem_dma_obj(buffer->gem);
155 
156 	fb = buffer->fb;
157 
158 	/*
159 	 * Deferred I/O requires struct page for framebuffer memory,
160 	 * which is not guaranteed for all DMA ranges. We thus only
161 	 * install deferred I/O if we have a framebuffer that requires
162 	 * it.
163 	 */
164 	if (fb->funcs->dirty)
165 		use_deferred_io = true;
166 
167 	ret = drm_client_buffer_vmap(buffer, &map);
168 	if (ret) {
169 		goto err_drm_client_buffer_delete;
170 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
171 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
172 		goto err_drm_client_buffer_delete;
173 	}
174 
175 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
176 	fb_helper->buffer = buffer;
177 	fb_helper->fb = fb;
178 
179 	info = drm_fb_helper_alloc_info(fb_helper);
180 	if (IS_ERR(info)) {
181 		ret = PTR_ERR(info);
182 		goto err_drm_client_buffer_vunmap;
183 	}
184 
185 	drm_fb_helper_fill_info(info, fb_helper, sizes);
186 
187 	if (use_deferred_io)
188 		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
189 	else
190 		info->fbops = &drm_fbdev_dma_fb_ops;
191 
192 	/* screen */
193 	info->flags |= FBINFO_VIRTFB; /* system memory */
194 	if (dma_obj->map_noncoherent)
195 		info->flags |= FBINFO_READS_FAST; /* signal caching */
196 	info->screen_size = sizes->surface_height * fb->pitches[0];
197 	info->screen_buffer = map.vaddr;
198 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
199 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
200 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
201 	}
202 	info->fix.smem_len = info->screen_size;
203 
204 	/*
205 	 * Only set up deferred I/O if the screen buffer supports
206 	 * it. If this disagrees with the previous test for ->dirty,
207 	 * mmap on the /dev/fb file might not work correctly.
208 	 */
209 	if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
210 		unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
211 
212 		if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
213 			use_deferred_io = false;
214 	}
215 
216 	/* deferred I/O */
217 	if (use_deferred_io) {
218 		fb_helper->fbdefio.delay = HZ / 20;
219 		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
220 
221 		info->fbdefio = &fb_helper->fbdefio;
222 		ret = fb_deferred_io_init(info);
223 		if (ret)
224 			goto err_drm_fb_helper_release_info;
225 	}
226 
227 	return 0;
228 
229 err_drm_fb_helper_release_info:
230 	drm_fb_helper_release_info(fb_helper);
231 err_drm_client_buffer_vunmap:
232 	fb_helper->fb = NULL;
233 	fb_helper->buffer = NULL;
234 	drm_client_buffer_vunmap(buffer);
235 err_drm_client_buffer_delete:
236 	drm_client_framebuffer_delete(buffer);
237 	return ret;
238 }
239 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
240