xref: /linux/drivers/gpu/drm/drm_fbdev_dma.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/fb.h>
4 
5 #include <drm/drm_crtc_helper.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_fb_dma_helper.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem_dma_helper.h>
11 
12 #include <drm/drm_fbdev_dma.h>
13 
14 /*
15  * struct fb_ops
16  */
17 
18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
19 {
20 	struct drm_fb_helper *fb_helper = info->par;
21 
22 	/* No need to take a ref for fbcon because it unbinds on unregister */
23 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
24 		return -ENODEV;
25 
26 	return 0;
27 }
28 
29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
30 {
31 	struct drm_fb_helper *fb_helper = info->par;
32 
33 	if (user)
34 		module_put(fb_helper->dev->driver->fops->owner);
35 
36 	return 0;
37 }
38 
39 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
40 				   drm_fb_helper_damage_range,
41 				   drm_fb_helper_damage_area);
42 
43 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
44 {
45 	struct drm_fb_helper *fb_helper = info->par;
46 	struct drm_framebuffer *fb = fb_helper->fb;
47 	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
48 
49 	if (!dma->map_noncoherent)
50 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
51 
52 	return fb_deferred_io_mmap(info, vma);
53 }
54 
55 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
56 {
57 	struct drm_fb_helper *fb_helper = info->par;
58 
59 	if (!fb_helper->dev)
60 		return;
61 
62 	fb_deferred_io_cleanup(info);
63 	drm_fb_helper_fini(fb_helper);
64 
65 	drm_client_buffer_vunmap(fb_helper->buffer);
66 	drm_client_framebuffer_delete(fb_helper->buffer);
67 	drm_client_release(&fb_helper->client);
68 	drm_fb_helper_unprepare(fb_helper);
69 	kfree(fb_helper);
70 }
71 
72 static const struct fb_ops drm_fbdev_dma_fb_ops = {
73 	.owner = THIS_MODULE,
74 	.fb_open = drm_fbdev_dma_fb_open,
75 	.fb_release = drm_fbdev_dma_fb_release,
76 	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
77 	DRM_FB_HELPER_DEFAULT_OPS,
78 	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
79 	.fb_mmap = drm_fbdev_dma_fb_mmap,
80 	.fb_destroy = drm_fbdev_dma_fb_destroy,
81 };
82 
83 /*
84  * struct drm_fb_helper
85  */
86 
87 static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
88 					 struct drm_fb_helper_surface_size *sizes)
89 {
90 	struct drm_client_dev *client = &fb_helper->client;
91 	struct drm_device *dev = fb_helper->dev;
92 	struct drm_client_buffer *buffer;
93 	struct drm_gem_dma_object *dma_obj;
94 	struct drm_framebuffer *fb;
95 	struct fb_info *info;
96 	u32 format;
97 	struct iosys_map map;
98 	int ret;
99 
100 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
101 		    sizes->surface_width, sizes->surface_height,
102 		    sizes->surface_bpp);
103 
104 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
105 					     sizes->surface_depth);
106 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
107 					       sizes->surface_height, format);
108 	if (IS_ERR(buffer))
109 		return PTR_ERR(buffer);
110 	dma_obj = to_drm_gem_dma_obj(buffer->gem);
111 
112 	fb = buffer->fb;
113 
114 	ret = drm_client_buffer_vmap(buffer, &map);
115 	if (ret) {
116 		goto err_drm_client_buffer_delete;
117 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
118 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
119 		goto err_drm_client_buffer_delete;
120 	}
121 
122 	fb_helper->buffer = buffer;
123 	fb_helper->fb = fb;
124 
125 	info = drm_fb_helper_alloc_info(fb_helper);
126 	if (IS_ERR(info)) {
127 		ret = PTR_ERR(info);
128 		goto err_drm_client_buffer_vunmap;
129 	}
130 
131 	drm_fb_helper_fill_info(info, fb_helper, sizes);
132 
133 	info->fbops = &drm_fbdev_dma_fb_ops;
134 
135 	/* screen */
136 	info->flags |= FBINFO_VIRTFB; /* system memory */
137 	if (dma_obj->map_noncoherent)
138 		info->flags |= FBINFO_READS_FAST; /* signal caching */
139 	info->screen_size = sizes->surface_height * fb->pitches[0];
140 	info->screen_buffer = map.vaddr;
141 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
142 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
143 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
144 	}
145 	info->fix.smem_len = info->screen_size;
146 
147 	/* deferred I/O */
148 	fb_helper->fbdefio.delay = HZ / 20;
149 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
150 
151 	info->fbdefio = &fb_helper->fbdefio;
152 	ret = fb_deferred_io_init(info);
153 	if (ret)
154 		goto err_drm_fb_helper_release_info;
155 
156 	return 0;
157 
158 err_drm_fb_helper_release_info:
159 	drm_fb_helper_release_info(fb_helper);
160 err_drm_client_buffer_vunmap:
161 	fb_helper->fb = NULL;
162 	fb_helper->buffer = NULL;
163 	drm_client_buffer_vunmap(buffer);
164 err_drm_client_buffer_delete:
165 	drm_client_framebuffer_delete(buffer);
166 	return ret;
167 }
168 
169 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
170 					 struct drm_clip_rect *clip)
171 {
172 	struct drm_device *dev = helper->dev;
173 	int ret;
174 
175 	/* Call damage handlers only if necessary */
176 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
177 		return 0;
178 
179 	if (helper->fb->funcs->dirty) {
180 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
181 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
182 			return ret;
183 	}
184 
185 	return 0;
186 }
187 
188 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
189 	.fb_probe = drm_fbdev_dma_helper_fb_probe,
190 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
191 };
192 
193 /*
194  * struct drm_client_funcs
195  */
196 
197 static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
198 {
199 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
200 
201 	if (fb_helper->info) {
202 		drm_fb_helper_unregister_info(fb_helper);
203 	} else {
204 		drm_client_release(&fb_helper->client);
205 		drm_fb_helper_unprepare(fb_helper);
206 		kfree(fb_helper);
207 	}
208 }
209 
210 static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
211 {
212 	drm_fb_helper_lastclose(client->dev);
213 
214 	return 0;
215 }
216 
217 static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
218 {
219 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
220 	struct drm_device *dev = client->dev;
221 	int ret;
222 
223 	if (dev->fb_helper)
224 		return drm_fb_helper_hotplug_event(dev->fb_helper);
225 
226 	ret = drm_fb_helper_init(dev, fb_helper);
227 	if (ret)
228 		goto err_drm_err;
229 
230 	if (!drm_drv_uses_atomic_modeset(dev))
231 		drm_helper_disable_unused_functions(dev);
232 
233 	ret = drm_fb_helper_initial_config(fb_helper);
234 	if (ret)
235 		goto err_drm_fb_helper_fini;
236 
237 	return 0;
238 
239 err_drm_fb_helper_fini:
240 	drm_fb_helper_fini(fb_helper);
241 err_drm_err:
242 	drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
243 	return ret;
244 }
245 
246 static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
247 	.owner		= THIS_MODULE,
248 	.unregister	= drm_fbdev_dma_client_unregister,
249 	.restore	= drm_fbdev_dma_client_restore,
250 	.hotplug	= drm_fbdev_dma_client_hotplug,
251 };
252 
253 /**
254  * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
255  * @dev: DRM device
256  * @preferred_bpp: Preferred bits per pixel for the device.
257  *                 32 is used if this is zero.
258  *
259  * This function sets up fbdev emulation for GEM DMA drivers that support
260  * dumb buffers with a virtual address and that can be mmap'ed.
261  * drm_fbdev_dma_setup() shall be called after the DRM driver registered
262  * the new DRM device with drm_dev_register().
263  *
264  * Restore, hotplug events and teardown are all taken care of. Drivers that do
265  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
266  * Simple drivers might use drm_mode_config_helper_suspend().
267  *
268  * This function is safe to call even when there are no connectors present.
269  * Setup will be retried on the next hotplug event.
270  *
271  * The fbdev is destroyed by drm_dev_unregister().
272  */
273 void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
274 {
275 	struct drm_fb_helper *fb_helper;
276 	int ret;
277 
278 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
279 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
280 
281 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
282 	if (!fb_helper)
283 		return;
284 	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
285 
286 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
287 	if (ret) {
288 		drm_err(dev, "Failed to register client: %d\n", ret);
289 		goto err_drm_client_init;
290 	}
291 
292 	drm_client_register(&fb_helper->client);
293 
294 	return;
295 
296 err_drm_client_init:
297 	drm_fb_helper_unprepare(fb_helper);
298 	kfree(fb_helper);
299 }
300 EXPORT_SYMBOL(drm_fbdev_dma_setup);
301