xref: /linux/drivers/gpu/drm/gma500/fbdev.c (revision 447281e71527080244ed1fa07e84a9d4e0654616)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #include <linux/fb.h>
9 #include <linux/pfn_t.h>
10 
11 #include <drm/drm_crtc_helper.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_framebuffer.h>
15 
16 #include "gem.h"
17 #include "psb_drv.h"
18 
19 /*
20  * VM area struct
21  */
22 
23 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
24 {
25 	struct vm_area_struct *vma = vmf->vma;
26 	struct fb_info *info = vma->vm_private_data;
27 	unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
28 	unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
29 	vm_fault_t err = VM_FAULT_SIGBUS;
30 	unsigned long page_num = vma_pages(vma);
31 	unsigned long i;
32 
33 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
34 
35 	for (i = 0; i < page_num; ++i) {
36 		err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
37 		if (unlikely(err & VM_FAULT_ERROR))
38 			break;
39 		address += PAGE_SIZE;
40 		++pfn;
41 	}
42 
43 	return err;
44 }
45 
46 static const struct vm_operations_struct psb_fbdev_vm_ops = {
47 	.fault	= psb_fbdev_vm_fault,
48 };
49 
50 /*
51  * struct fb_ops
52  */
53 
54 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
55 
56 static int psb_fbdev_fb_setcolreg(unsigned int regno,
57 				  unsigned int red, unsigned int green,
58 				  unsigned int blue, unsigned int transp,
59 				  struct fb_info *info)
60 {
61 	struct drm_fb_helper *fb_helper = info->par;
62 	struct drm_framebuffer *fb = fb_helper->fb;
63 	uint32_t v;
64 
65 	if (!fb)
66 		return -ENOMEM;
67 
68 	if (regno > 255)
69 		return 1;
70 
71 	red = CMAP_TOHW(red, info->var.red.length);
72 	blue = CMAP_TOHW(blue, info->var.blue.length);
73 	green = CMAP_TOHW(green, info->var.green.length);
74 	transp = CMAP_TOHW(transp, info->var.transp.length);
75 
76 	v = (red << info->var.red.offset) |
77 	    (green << info->var.green.offset) |
78 	    (blue << info->var.blue.offset) |
79 	    (transp << info->var.transp.offset);
80 
81 	if (regno < 16) {
82 		switch (fb->format->cpp[0] * 8) {
83 		case 16:
84 			((uint32_t *) info->pseudo_palette)[regno] = v;
85 			break;
86 		case 24:
87 		case 32:
88 			((uint32_t *) info->pseudo_palette)[regno] = v;
89 			break;
90 		}
91 	}
92 
93 	return 0;
94 }
95 
96 static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
97 {
98 	if (vma->vm_pgoff != 0)
99 		return -EINVAL;
100 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
101 		return -EINVAL;
102 
103 	/*
104 	 * If this is a GEM object then info->screen_base is the virtual
105 	 * kernel remapping of the object. FIXME: Review if this is
106 	 * suitable for our mmap work
107 	 */
108 	vma->vm_ops = &psb_fbdev_vm_ops;
109 	vma->vm_private_data = info;
110 	vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
111 
112 	return 0;
113 }
114 
115 static void psb_fbdev_fb_destroy(struct fb_info *info)
116 {
117 	struct drm_fb_helper *fb_helper = info->par;
118 	struct drm_framebuffer *fb = fb_helper->fb;
119 	struct drm_gem_object *obj = fb->obj[0];
120 
121 	drm_fb_helper_fini(fb_helper);
122 
123 	drm_framebuffer_unregister_private(fb);
124 	fb->obj[0] = NULL;
125 	drm_framebuffer_cleanup(fb);
126 	kfree(fb);
127 
128 	drm_gem_object_put(obj);
129 
130 	drm_client_release(&fb_helper->client);
131 
132 	drm_fb_helper_unprepare(fb_helper);
133 	kfree(fb_helper);
134 }
135 
136 static const struct fb_ops psb_fbdev_fb_ops = {
137 	.owner = THIS_MODULE,
138 	__FB_DEFAULT_IO_OPS_RDWR,
139 	DRM_FB_HELPER_DEFAULT_OPS,
140 	.fb_setcolreg = psb_fbdev_fb_setcolreg,
141 	__FB_DEFAULT_IO_OPS_DRAW,
142 	.fb_mmap = psb_fbdev_fb_mmap,
143 	.fb_destroy = psb_fbdev_fb_destroy,
144 };
145 
146 /*
147  * struct drm_fb_helper_funcs
148  */
149 
150 static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
151 			      struct drm_fb_helper_surface_size *sizes)
152 {
153 	struct drm_device *dev = fb_helper->dev;
154 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
155 	struct pci_dev *pdev = to_pci_dev(dev->dev);
156 	struct fb_info *info;
157 	struct drm_framebuffer *fb;
158 	struct drm_mode_fb_cmd2 mode_cmd = { };
159 	int size;
160 	int ret;
161 	struct psb_gem_object *backing;
162 	struct drm_gem_object *obj;
163 	u32 bpp, depth;
164 
165 	/* No 24-bit packed mode */
166 	if (sizes->surface_bpp == 24) {
167 		sizes->surface_bpp = 32;
168 		sizes->surface_depth = 24;
169 	}
170 	bpp = sizes->surface_bpp;
171 	depth = sizes->surface_depth;
172 
173 	/*
174 	 * If the mode does not fit in 32 bit then switch to 16 bit to get
175 	 * a console on full resolution. The X mode setting server will
176 	 * allocate its own 32-bit GEM framebuffer.
177 	 */
178 	size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
179 		     sizes->surface_height;
180 	size = ALIGN(size, PAGE_SIZE);
181 
182 	if (size > dev_priv->vram_stolen_size) {
183 		sizes->surface_bpp = 16;
184 		sizes->surface_depth = 16;
185 	}
186 	bpp = sizes->surface_bpp;
187 	depth = sizes->surface_depth;
188 
189 	mode_cmd.width = sizes->surface_width;
190 	mode_cmd.height = sizes->surface_height;
191 	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
192 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
193 
194 	size = mode_cmd.pitches[0] * mode_cmd.height;
195 	size = ALIGN(size, PAGE_SIZE);
196 
197 	/* Allocate the framebuffer in the GTT with stolen page backing */
198 	backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
199 	if (IS_ERR(backing))
200 		return PTR_ERR(backing);
201 	obj = &backing->base;
202 
203 	fb = psb_framebuffer_create(dev, &mode_cmd, obj);
204 	if (IS_ERR(fb)) {
205 		ret = PTR_ERR(fb);
206 		goto err_drm_gem_object_put;
207 	}
208 
209 	fb_helper->fb = fb;
210 
211 	info = drm_fb_helper_alloc_info(fb_helper);
212 	if (IS_ERR(info)) {
213 		ret = PTR_ERR(info);
214 		goto err_drm_framebuffer_unregister_private;
215 	}
216 
217 	info->fbops = &psb_fbdev_fb_ops;
218 	info->flags = FBINFO_DEFAULT;
219 	/* Accessed stolen memory directly */
220 	info->screen_base = dev_priv->vram_addr + backing->offset;
221 	info->screen_size = size;
222 
223 	drm_fb_helper_fill_info(info, fb_helper, sizes);
224 
225 	info->fix.smem_start = dev_priv->stolen_base + backing->offset;
226 	info->fix.smem_len = size;
227 	info->fix.ywrapstep = 0;
228 	info->fix.ypanstep = 0;
229 	info->fix.mmio_start = pci_resource_start(pdev, 0);
230 	info->fix.mmio_len = pci_resource_len(pdev, 0);
231 
232 	fb_memset_io(info->screen_base, 0, info->screen_size);
233 
234 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
235 
236 	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
237 
238 	return 0;
239 
240 err_drm_framebuffer_unregister_private:
241 	drm_framebuffer_unregister_private(fb);
242 	fb->obj[0] = NULL;
243 	drm_framebuffer_cleanup(fb);
244 	kfree(fb);
245 err_drm_gem_object_put:
246 	drm_gem_object_put(obj);
247 	return ret;
248 }
249 
250 static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
251 	.fb_probe = psb_fbdev_fb_probe,
252 };
253 
254 /*
255  * struct drm_client_funcs and setup code
256  */
257 
258 static void psb_fbdev_client_unregister(struct drm_client_dev *client)
259 {
260 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
261 
262 	if (fb_helper->info) {
263 		drm_fb_helper_unregister_info(fb_helper);
264 	} else {
265 		drm_fb_helper_unprepare(fb_helper);
266 		drm_client_release(&fb_helper->client);
267 		kfree(fb_helper);
268 	}
269 }
270 
271 static int psb_fbdev_client_restore(struct drm_client_dev *client)
272 {
273 	drm_fb_helper_lastclose(client->dev);
274 
275 	return 0;
276 }
277 
278 static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
279 {
280 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
281 	struct drm_device *dev = client->dev;
282 	int ret;
283 
284 	if (dev->fb_helper)
285 		return drm_fb_helper_hotplug_event(dev->fb_helper);
286 
287 	ret = drm_fb_helper_init(dev, fb_helper);
288 	if (ret)
289 		goto err_drm_err;
290 
291 	if (!drm_drv_uses_atomic_modeset(dev))
292 		drm_helper_disable_unused_functions(dev);
293 
294 	ret = drm_fb_helper_initial_config(fb_helper);
295 	if (ret)
296 		goto err_drm_fb_helper_fini;
297 
298 	return 0;
299 
300 err_drm_fb_helper_fini:
301 	drm_fb_helper_fini(fb_helper);
302 err_drm_err:
303 	drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
304 	return ret;
305 }
306 
307 static const struct drm_client_funcs psb_fbdev_client_funcs = {
308 	.owner		= THIS_MODULE,
309 	.unregister	= psb_fbdev_client_unregister,
310 	.restore	= psb_fbdev_client_restore,
311 	.hotplug	= psb_fbdev_client_hotplug,
312 };
313 
314 void psb_fbdev_setup(struct drm_psb_private *dev_priv)
315 {
316 	struct drm_device *dev = &dev_priv->dev;
317 	struct drm_fb_helper *fb_helper;
318 	int ret;
319 
320 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
321 	if (!fb_helper)
322 		return;
323 	drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
324 
325 	ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
326 	if (ret) {
327 		drm_err(dev, "Failed to register client: %d\n", ret);
328 		goto err_drm_fb_helper_unprepare;
329 	}
330 
331 	drm_client_register(&fb_helper->client);
332 
333 	return;
334 
335 err_drm_fb_helper_unprepare:
336 	drm_fb_helper_unprepare(fb_helper);
337 	kfree(fb_helper);
338 }
339