xref: /linux/drivers/gpu/drm/msm/msm_fbdev.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <drm/drm_crtc.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_fourcc.h>
10 
11 #include "msm_drv.h"
12 #include "msm_kms.h"
13 
14 extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
15 					struct vm_area_struct *vma);
16 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
17 
18 /*
19  * fbdev funcs, to implement legacy fbdev interface on top of drm driver
20  */
21 
22 #define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
23 
24 struct msm_fbdev {
25 	struct drm_fb_helper base;
26 	struct drm_framebuffer *fb;
27 };
28 
29 static const struct fb_ops msm_fb_ops = {
30 	.owner = THIS_MODULE,
31 	DRM_FB_HELPER_DEFAULT_OPS,
32 
33 	/* Note: to properly handle manual update displays, we wrap the
34 	 * basic fbdev ops which write to the framebuffer
35 	 */
36 	.fb_read = drm_fb_helper_sys_read,
37 	.fb_write = drm_fb_helper_sys_write,
38 	.fb_fillrect = drm_fb_helper_sys_fillrect,
39 	.fb_copyarea = drm_fb_helper_sys_copyarea,
40 	.fb_imageblit = drm_fb_helper_sys_imageblit,
41 	.fb_mmap = msm_fbdev_mmap,
42 };
43 
44 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
45 {
46 	struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
47 	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
48 	struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
49 	int ret = 0;
50 
51 	ret = drm_gem_mmap_obj(bo, bo->size, vma);
52 	if (ret) {
53 		pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
54 		return ret;
55 	}
56 
57 	return msm_gem_mmap_obj(bo, vma);
58 }
59 
60 static int msm_fbdev_create(struct drm_fb_helper *helper,
61 		struct drm_fb_helper_surface_size *sizes)
62 {
63 	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
64 	struct drm_device *dev = helper->dev;
65 	struct msm_drm_private *priv = dev->dev_private;
66 	struct drm_framebuffer *fb = NULL;
67 	struct drm_gem_object *bo;
68 	struct fb_info *fbi = NULL;
69 	uint64_t paddr;
70 	uint32_t format;
71 	int ret, pitch;
72 
73 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
74 
75 	DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
76 			sizes->surface_height, sizes->surface_bpp,
77 			sizes->fb_width, sizes->fb_height);
78 
79 	pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
80 	fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
81 			sizes->surface_height, pitch, format);
82 
83 	if (IS_ERR(fb)) {
84 		DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
85 		return PTR_ERR(fb);
86 	}
87 
88 	bo = msm_framebuffer_bo(fb, 0);
89 
90 	mutex_lock(&dev->struct_mutex);
91 
92 	/*
93 	 * NOTE: if we can be guaranteed to be able to map buffer
94 	 * in panic (ie. lock-safe, etc) we could avoid pinning the
95 	 * buffer now:
96 	 */
97 	ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
98 	if (ret) {
99 		DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
100 		goto fail_unlock;
101 	}
102 
103 	fbi = drm_fb_helper_alloc_fbi(helper);
104 	if (IS_ERR(fbi)) {
105 		DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
106 		ret = PTR_ERR(fbi);
107 		goto fail_unlock;
108 	}
109 
110 	DBG("fbi=%p, dev=%p", fbi, dev);
111 
112 	fbdev->fb = fb;
113 	helper->fb = fb;
114 
115 	fbi->fbops = &msm_fb_ops;
116 
117 	drm_fb_helper_fill_info(fbi, helper, sizes);
118 
119 	dev->mode_config.fb_base = paddr;
120 
121 	fbi->screen_base = msm_gem_get_vaddr(bo);
122 	if (IS_ERR(fbi->screen_base)) {
123 		ret = PTR_ERR(fbi->screen_base);
124 		goto fail_unlock;
125 	}
126 	fbi->screen_size = bo->size;
127 	fbi->fix.smem_start = paddr;
128 	fbi->fix.smem_len = bo->size;
129 
130 	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
131 	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
132 
133 	mutex_unlock(&dev->struct_mutex);
134 
135 	return 0;
136 
137 fail_unlock:
138 	mutex_unlock(&dev->struct_mutex);
139 	drm_framebuffer_remove(fb);
140 	return ret;
141 }
142 
143 static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
144 	.fb_probe = msm_fbdev_create,
145 };
146 
147 /* initialize fbdev helper */
148 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
149 {
150 	struct msm_drm_private *priv = dev->dev_private;
151 	struct msm_fbdev *fbdev = NULL;
152 	struct drm_fb_helper *helper;
153 	int ret;
154 
155 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
156 	if (!fbdev)
157 		goto fail;
158 
159 	helper = &fbdev->base;
160 
161 	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
162 
163 	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
164 	if (ret) {
165 		DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
166 		goto fail;
167 	}
168 
169 	ret = drm_fb_helper_single_add_all_connectors(helper);
170 	if (ret)
171 		goto fini;
172 
173 	/* the fw fb could be anywhere in memory */
174 	drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
175 
176 	ret = drm_fb_helper_initial_config(helper, 32);
177 	if (ret)
178 		goto fini;
179 
180 	priv->fbdev = helper;
181 
182 	return helper;
183 
184 fini:
185 	drm_fb_helper_fini(helper);
186 fail:
187 	kfree(fbdev);
188 	return NULL;
189 }
190 
191 void msm_fbdev_free(struct drm_device *dev)
192 {
193 	struct msm_drm_private *priv = dev->dev_private;
194 	struct drm_fb_helper *helper = priv->fbdev;
195 	struct msm_fbdev *fbdev;
196 
197 	DBG();
198 
199 	drm_fb_helper_unregister_fbi(helper);
200 
201 	drm_fb_helper_fini(helper);
202 
203 	fbdev = to_msm_fbdev(priv->fbdev);
204 
205 	/* this will free the backing object */
206 	if (fbdev->fb) {
207 		struct drm_gem_object *bo =
208 			msm_framebuffer_bo(fbdev->fb, 0);
209 		msm_gem_put_vaddr(bo);
210 		drm_framebuffer_remove(fbdev->fb);
211 	}
212 
213 	kfree(fbdev);
214 
215 	priv->fbdev = NULL;
216 }
217