1 /* exynos_drm_fbdev.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Authors: 5 * Inki Dae <inki.dae@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 */ 14 15 #include <drm/drmP.h> 16 #include <drm/drm_crtc.h> 17 #include <drm/drm_fb_helper.h> 18 #include <drm/drm_crtc_helper.h> 19 #include <drm/exynos_drm.h> 20 21 #include "exynos_drm_drv.h" 22 #include "exynos_drm_fb.h" 23 #include "exynos_drm_fbdev.h" 24 #include "exynos_drm_iommu.h" 25 26 #define MAX_CONNECTOR 4 27 #define PREFERRED_BPP 32 28 29 #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\ 30 drm_fb_helper) 31 32 struct exynos_drm_fbdev { 33 struct drm_fb_helper drm_fb_helper; 34 struct exynos_drm_gem *exynos_gem; 35 }; 36 37 static int exynos_drm_fb_mmap(struct fb_info *info, 38 struct vm_area_struct *vma) 39 { 40 struct drm_fb_helper *helper = info->par; 41 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); 42 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; 43 unsigned long vm_size; 44 int ret; 45 46 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 47 48 vm_size = vma->vm_end - vma->vm_start; 49 50 if (vm_size > exynos_gem->size) 51 return -EINVAL; 52 53 ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, 54 exynos_gem->dma_addr, exynos_gem->size, 55 &exynos_gem->dma_attrs); 56 if (ret < 0) { 57 DRM_ERROR("failed to mmap.\n"); 58 return ret; 59 } 60 61 return 0; 62 } 63 64 static struct fb_ops exynos_drm_fb_ops = { 65 .owner = THIS_MODULE, 66 .fb_mmap = exynos_drm_fb_mmap, 67 .fb_fillrect = drm_fb_helper_cfb_fillrect, 68 .fb_copyarea = drm_fb_helper_cfb_copyarea, 69 .fb_imageblit = drm_fb_helper_cfb_imageblit, 70 .fb_check_var = drm_fb_helper_check_var, 71 .fb_set_par = drm_fb_helper_set_par, 72 .fb_blank = drm_fb_helper_blank, 73 .fb_pan_display = drm_fb_helper_pan_display, 74 .fb_setcmap = drm_fb_helper_setcmap, 75 }; 76 77 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 78 struct drm_fb_helper_surface_size *sizes, 79 struct exynos_drm_gem *exynos_gem) 80 { 81 struct fb_info *fbi; 82 struct drm_framebuffer *fb = helper->fb; 83 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); 84 unsigned int nr_pages; 85 unsigned long offset; 86 87 fbi = drm_fb_helper_alloc_fbi(helper); 88 if (IS_ERR(fbi)) { 89 DRM_ERROR("failed to allocate fb info.\n"); 90 return PTR_ERR(fbi); 91 } 92 93 fbi->par = helper; 94 fbi->flags = FBINFO_FLAG_DEFAULT; 95 fbi->fbops = &exynos_drm_fb_ops; 96 97 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 98 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); 99 100 nr_pages = exynos_gem->size >> PAGE_SHIFT; 101 102 exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages, 103 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 104 if (!exynos_gem->kvaddr) { 105 DRM_ERROR("failed to map pages to kernel space.\n"); 106 drm_fb_helper_release_fbi(helper); 107 return -EIO; 108 } 109 110 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 111 offset += fbi->var.yoffset * fb->pitches[0]; 112 113 fbi->screen_base = exynos_gem->kvaddr + offset; 114 fbi->screen_size = size; 115 fbi->fix.smem_len = size; 116 117 return 0; 118 } 119 120 static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, 121 struct drm_fb_helper_surface_size *sizes) 122 { 123 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); 124 struct exynos_drm_gem *exynos_gem; 125 struct drm_device *dev = helper->dev; 126 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 127 struct platform_device *pdev = dev->platformdev; 128 unsigned long size; 129 int ret; 130 131 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", 132 sizes->surface_width, sizes->surface_height, 133 sizes->surface_bpp); 134 135 mode_cmd.width = sizes->surface_width; 136 mode_cmd.height = sizes->surface_height; 137 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3); 138 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 139 sizes->surface_depth); 140 141 size = mode_cmd.pitches[0] * mode_cmd.height; 142 143 exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size); 144 /* 145 * If physically contiguous memory allocation fails and if IOMMU is 146 * supported then try to get buffer from non physically contiguous 147 * memory area. 148 */ 149 if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) { 150 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); 151 exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, 152 size); 153 } 154 155 if (IS_ERR(exynos_gem)) 156 return PTR_ERR(exynos_gem); 157 158 exynos_fbdev->exynos_gem = exynos_gem; 159 160 helper->fb = 161 exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1); 162 if (IS_ERR(helper->fb)) { 163 DRM_ERROR("failed to create drm framebuffer.\n"); 164 ret = PTR_ERR(helper->fb); 165 goto err_destroy_gem; 166 } 167 168 ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem); 169 if (ret < 0) 170 goto err_destroy_framebuffer; 171 172 return ret; 173 174 err_destroy_framebuffer: 175 drm_framebuffer_cleanup(helper->fb); 176 err_destroy_gem: 177 exynos_drm_gem_destroy(exynos_gem); 178 179 /* 180 * if failed, all resources allocated above would be released by 181 * drm_mode_config_cleanup() when drm_load() had been called prior 182 * to any specific driver such as fimd or hdmi driver. 183 */ 184 185 return ret; 186 } 187 188 static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { 189 .fb_probe = exynos_drm_fbdev_create, 190 }; 191 192 static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev) 193 { 194 struct drm_connector *connector; 195 bool ret = false; 196 197 mutex_lock(&dev->mode_config.mutex); 198 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 199 if (connector->status != connector_status_connected) 200 continue; 201 202 ret = true; 203 break; 204 } 205 mutex_unlock(&dev->mode_config.mutex); 206 207 return ret; 208 } 209 210 int exynos_drm_fbdev_init(struct drm_device *dev) 211 { 212 struct exynos_drm_fbdev *fbdev; 213 struct exynos_drm_private *private = dev->dev_private; 214 struct drm_fb_helper *helper; 215 unsigned int num_crtc; 216 int ret; 217 218 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) 219 return 0; 220 221 if (!exynos_drm_fbdev_is_anything_connected(dev)) 222 return 0; 223 224 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); 225 if (!fbdev) 226 return -ENOMEM; 227 228 private->fb_helper = helper = &fbdev->drm_fb_helper; 229 230 drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs); 231 232 num_crtc = dev->mode_config.num_crtc; 233 234 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR); 235 if (ret < 0) { 236 DRM_ERROR("failed to initialize drm fb helper.\n"); 237 goto err_init; 238 } 239 240 ret = drm_fb_helper_single_add_all_connectors(helper); 241 if (ret < 0) { 242 DRM_ERROR("failed to register drm_fb_helper_connector.\n"); 243 goto err_setup; 244 245 } 246 247 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 248 if (ret < 0) { 249 DRM_ERROR("failed to set up hw configuration.\n"); 250 goto err_setup; 251 } 252 253 return 0; 254 255 err_setup: 256 drm_fb_helper_fini(helper); 257 258 err_init: 259 private->fb_helper = NULL; 260 kfree(fbdev); 261 262 return ret; 263 } 264 265 static void exynos_drm_fbdev_destroy(struct drm_device *dev, 266 struct drm_fb_helper *fb_helper) 267 { 268 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper); 269 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; 270 struct drm_framebuffer *fb; 271 272 if (exynos_gem->kvaddr) 273 vunmap(exynos_gem->kvaddr); 274 275 /* release drm framebuffer and real buffer */ 276 if (fb_helper->fb && fb_helper->fb->funcs) { 277 fb = fb_helper->fb; 278 if (fb) { 279 drm_framebuffer_unregister_private(fb); 280 drm_framebuffer_remove(fb); 281 } 282 } 283 284 drm_fb_helper_unregister_fbi(fb_helper); 285 drm_fb_helper_release_fbi(fb_helper); 286 287 drm_fb_helper_fini(fb_helper); 288 } 289 290 void exynos_drm_fbdev_fini(struct drm_device *dev) 291 { 292 struct exynos_drm_private *private = dev->dev_private; 293 struct exynos_drm_fbdev *fbdev; 294 295 if (!private || !private->fb_helper) 296 return; 297 298 fbdev = to_exynos_fbdev(private->fb_helper); 299 300 exynos_drm_fbdev_destroy(dev, private->fb_helper); 301 kfree(fbdev); 302 private->fb_helper = NULL; 303 } 304 305 void exynos_drm_fbdev_restore_mode(struct drm_device *dev) 306 { 307 struct exynos_drm_private *private = dev->dev_private; 308 309 if (!private || !private->fb_helper) 310 return; 311 312 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper); 313 } 314 315 void exynos_drm_output_poll_changed(struct drm_device *dev) 316 { 317 struct exynos_drm_private *private = dev->dev_private; 318 struct drm_fb_helper *fb_helper = private->fb_helper; 319 320 if (fb_helper) 321 drm_fb_helper_hotplug_event(fb_helper); 322 else 323 exynos_drm_fbdev_init(dev); 324 } 325