xref: /linux/drivers/gpu/drm/omapdrm/omap_fbdev.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4  * Author: Rob Clark <rob@ti.com>
5  */
6 
7 #include <linux/fb.h>
8 
9 #include <drm/clients/drm_client_setup.h>
10 #include <drm/drm_drv.h>
11 #include <drm/drm_crtc_helper.h>
12 #include <drm/drm_fb_helper.h>
13 #include <drm/drm_file.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_framebuffer.h>
16 #include <drm/drm_gem_framebuffer_helper.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_print.h>
19 #include <drm/drm_util.h>
20 
21 #include "omap_drv.h"
22 #include "omap_fbdev.h"
23 
24 MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
25 static bool ywrap_enabled = true;
26 module_param_named(ywrap, ywrap_enabled, bool, 0644);
27 
28 /*
29  * fbdev funcs, to implement legacy fbdev interface on top of drm driver
30  */
31 
32 struct omap_fbdev {
33 	struct drm_device *dev;
34 	bool ywrap_enabled;
35 
36 	/* for deferred dmm roll when getting called in atomic ctx */
37 	struct work_struct work;
38 };
39 
40 static struct drm_fb_helper *get_fb(struct fb_info *fbi);
41 
42 static void pan_worker(struct work_struct *work)
43 {
44 	struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
45 	struct drm_fb_helper *helper = fbdev->dev->fb_helper;
46 	struct fb_info *fbi = helper->info;
47 	struct drm_gem_object *bo = drm_gem_fb_get_obj(helper->fb, 0);
48 	int npages;
49 
50 	/* DMM roll shifts in 4K pages: */
51 	npages = fbi->fix.line_length >> PAGE_SHIFT;
52 	omap_gem_roll(bo, fbi->var.yoffset * npages);
53 }
54 
55 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
56 				   drm_fb_helper_damage_range,
57 				   drm_fb_helper_damage_area)
58 
59 static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
60 {
61 	struct drm_fb_helper *helper = get_fb(fbi);
62 	struct omap_drm_private *priv;
63 	struct omap_fbdev *fbdev;
64 
65 	if (!helper)
66 		goto fallback;
67 
68 	priv = helper->dev->dev_private;
69 	fbdev = priv->fbdev;
70 
71 	if (!fbdev->ywrap_enabled)
72 		goto fallback;
73 
74 	if (drm_can_sleep())
75 		pan_worker(&fbdev->work);
76 	else
77 		queue_work(priv->wq, &fbdev->work);
78 
79 	return 0;
80 
81 fallback:
82 	return drm_fb_helper_pan_display(var, fbi);
83 }
84 
85 static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
86 {
87 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
88 
89 	return fb_deferred_io_mmap(info, vma);
90 }
91 
92 static void omap_fbdev_fb_destroy(struct fb_info *info)
93 {
94 	struct drm_fb_helper *helper = info->par;
95 	struct drm_framebuffer *fb = helper->fb;
96 	struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
97 
98 	DBG();
99 
100 	fb_deferred_io_cleanup(info);
101 	drm_fb_helper_fini(helper);
102 
103 	omap_gem_unpin(bo);
104 	drm_framebuffer_remove(fb);
105 
106 	drm_client_release(&helper->client);
107 }
108 
109 /*
110  * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
111  * because we use write-combine.
112  */
113 static const struct fb_ops omap_fb_ops = {
114 	.owner = THIS_MODULE,
115 	__FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
116 	.fb_check_var	= drm_fb_helper_check_var,
117 	.fb_set_par	= drm_fb_helper_set_par,
118 	.fb_setcmap	= drm_fb_helper_setcmap,
119 	.fb_blank	= drm_fb_helper_blank,
120 	.fb_pan_display = omap_fbdev_pan_display,
121 	__FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
122 	.fb_ioctl	= drm_fb_helper_ioctl,
123 	.fb_mmap	= omap_fbdev_fb_mmap,
124 	.fb_destroy	= omap_fbdev_fb_destroy,
125 };
126 
127 static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
128 {
129 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
130 		return 0;
131 
132 	if (helper->fb->funcs->dirty)
133 		return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
134 
135 	return 0;
136 }
137 
138 static const struct drm_fb_helper_funcs omap_fbdev_helper_funcs = {
139 	.fb_dirty = omap_fbdev_dirty,
140 };
141 
142 static struct drm_fb_helper *get_fb(struct fb_info *fbi)
143 {
144 	if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
145 		/* these are not the fb's you're looking for */
146 		return NULL;
147 	}
148 	return fbi->par;
149 }
150 
151 int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
152 				  struct drm_fb_helper_surface_size *sizes)
153 {
154 	struct drm_device *dev = helper->dev;
155 	struct omap_drm_private *priv = dev->dev_private;
156 	struct omap_fbdev *fbdev = priv->fbdev;
157 	struct drm_framebuffer *fb = NULL;
158 	union omap_gem_size gsize;
159 	struct fb_info *fbi = NULL;
160 	struct drm_mode_fb_cmd2 mode_cmd = {0};
161 	struct drm_gem_object *bo;
162 	dma_addr_t dma_addr;
163 	int ret;
164 
165 	sizes->surface_bpp = 32;
166 	sizes->surface_depth = 24;
167 
168 	DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
169 			sizes->surface_height, sizes->surface_bpp,
170 			sizes->fb_width, sizes->fb_height);
171 
172 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
173 			sizes->surface_depth);
174 
175 	mode_cmd.width = sizes->surface_width;
176 	mode_cmd.height = sizes->surface_height;
177 
178 	mode_cmd.pitches[0] =
179 			DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
180 
181 	fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
182 	if (fbdev->ywrap_enabled) {
183 		/* need to align pitch to page size if using DMM scrolling */
184 		mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
185 	}
186 
187 	/* allocate backing bo */
188 	gsize = (union omap_gem_size){
189 		.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
190 	};
191 	DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
192 	bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
193 	if (!bo) {
194 		dev_err(dev->dev, "failed to allocate buffer object\n");
195 		ret = -ENOMEM;
196 		goto fail;
197 	}
198 
199 	fb = omap_framebuffer_init(dev,
200 				   drm_get_format_info(dev, mode_cmd.pixel_format,
201 						       mode_cmd.modifier[0]),
202 				   &mode_cmd, &bo);
203 	if (IS_ERR(fb)) {
204 		dev_err(dev->dev, "failed to allocate fb\n");
205 		/* note: if fb creation failed, we can't rely on fb destroy
206 		 * to unref the bo:
207 		 */
208 		drm_gem_object_put(bo);
209 		ret = PTR_ERR(fb);
210 		goto fail;
211 	}
212 
213 	/* note: this keeps the bo pinned.. which is perhaps not ideal,
214 	 * but is needed as long as we use fb_mmap() to mmap to userspace
215 	 * (since this happens using fix.smem_start).  Possibly we could
216 	 * implement our own mmap using GEM mmap support to avoid this
217 	 * (non-tiled buffer doesn't need to be pinned for fbcon to write
218 	 * to it).  Then we just need to be sure that we are able to re-
219 	 * pin it in case of an opps.
220 	 */
221 	ret = omap_gem_pin(bo, &dma_addr);
222 	if (ret) {
223 		dev_err(dev->dev, "could not pin framebuffer\n");
224 		ret = -ENOMEM;
225 		goto fail;
226 	}
227 
228 	fbi = drm_fb_helper_alloc_info(helper);
229 	if (IS_ERR(fbi)) {
230 		dev_err(dev->dev, "failed to allocate fb info\n");
231 		ret = PTR_ERR(fbi);
232 		goto fail;
233 	}
234 
235 	DBG("fbi=%p, dev=%p", fbi, dev);
236 
237 	helper->funcs = &omap_fbdev_helper_funcs;
238 	helper->fb = fb;
239 
240 	fbi->fbops = &omap_fb_ops;
241 
242 	drm_fb_helper_fill_info(fbi, helper, sizes);
243 
244 	fbi->flags |= FBINFO_VIRTFB;
245 	fbi->screen_buffer = omap_gem_vaddr(bo);
246 	fbi->screen_size = bo->size;
247 	fbi->fix.smem_start = dma_addr;
248 	fbi->fix.smem_len = bo->size;
249 
250 	/* deferred I/O */
251 	helper->fbdefio.delay = HZ / 20;
252 	helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
253 
254 	fbi->fbdefio = &helper->fbdefio;
255 	ret = fb_deferred_io_init(fbi);
256 	if (ret)
257 		goto fail;
258 
259 	/* if we have DMM, then we can use it for scrolling by just
260 	 * shuffling pages around in DMM rather than doing sw blit.
261 	 */
262 	if (fbdev->ywrap_enabled) {
263 		DRM_INFO("Enabling DMM ywrap scrolling\n");
264 		fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
265 		fbi->fix.ywrapstep = 1;
266 	}
267 
268 
269 	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
270 	DBG("allocated %dx%d fb", fb->width, fb->height);
271 
272 	return 0;
273 
274 fail:
275 
276 	if (ret) {
277 		if (fb)
278 			drm_framebuffer_remove(fb);
279 	}
280 
281 	return ret;
282 }
283 
284 void omap_fbdev_setup(struct drm_device *dev)
285 {
286 	struct omap_drm_private *priv = dev->dev_private;
287 	struct omap_fbdev *fbdev;
288 
289 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
290 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
291 
292 	fbdev = drmm_kzalloc(dev, sizeof(*fbdev), GFP_KERNEL);
293 	if (!fbdev)
294 		return;
295 	fbdev->dev = dev;
296 	INIT_WORK(&fbdev->work, pan_worker);
297 
298 	priv->fbdev = fbdev;
299 
300 	drm_client_setup(dev, NULL);
301 }
302