1 // SPDX-License-Identifier: MIT 2 3 #include <linux/fb.h> 4 5 #include <drm/drm_crtc_helper.h> 6 #include <drm/drm_drv.h> 7 #include <drm/drm_fb_dma_helper.h> 8 #include <drm/drm_fb_helper.h> 9 #include <drm/drm_framebuffer.h> 10 #include <drm/drm_gem_dma_helper.h> 11 12 #include <drm/drm_fbdev_dma.h> 13 14 /* 15 * struct fb_ops 16 */ 17 18 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user) 19 { 20 struct drm_fb_helper *fb_helper = info->par; 21 22 /* No need to take a ref for fbcon because it unbinds on unregister */ 23 if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) 24 return -ENODEV; 25 26 return 0; 27 } 28 29 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user) 30 { 31 struct drm_fb_helper *fb_helper = info->par; 32 33 if (user) 34 module_put(fb_helper->dev->driver->fops->owner); 35 36 return 0; 37 } 38 39 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma, 40 drm_fb_helper_damage_range, 41 drm_fb_helper_damage_area); 42 43 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 44 { 45 struct drm_fb_helper *fb_helper = info->par; 46 struct drm_framebuffer *fb = fb_helper->fb; 47 struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0); 48 49 if (!dma->map_noncoherent) 50 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 51 52 return fb_deferred_io_mmap(info, vma); 53 } 54 55 static void drm_fbdev_dma_fb_destroy(struct fb_info *info) 56 { 57 struct drm_fb_helper *fb_helper = info->par; 58 59 if (!fb_helper->dev) 60 return; 61 62 drm_fb_helper_fini(fb_helper); 63 64 drm_client_buffer_vunmap(fb_helper->buffer); 65 drm_client_framebuffer_delete(fb_helper->buffer); 66 drm_client_release(&fb_helper->client); 67 drm_fb_helper_unprepare(fb_helper); 68 kfree(fb_helper); 69 } 70 71 static const struct fb_ops drm_fbdev_dma_fb_ops = { 72 .owner = THIS_MODULE, 73 .fb_open = drm_fbdev_dma_fb_open, 74 .fb_release = drm_fbdev_dma_fb_release, 75 __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma), 76 DRM_FB_HELPER_DEFAULT_OPS, 77 __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma), 78 .fb_mmap = drm_fbdev_dma_fb_mmap, 79 .fb_destroy = drm_fbdev_dma_fb_destroy, 80 }; 81 82 /* 83 * struct drm_fb_helper 84 */ 85 86 static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, 87 struct drm_fb_helper_surface_size *sizes) 88 { 89 struct drm_client_dev *client = &fb_helper->client; 90 struct drm_device *dev = fb_helper->dev; 91 struct drm_client_buffer *buffer; 92 struct drm_gem_dma_object *dma_obj; 93 struct drm_framebuffer *fb; 94 struct fb_info *info; 95 u32 format; 96 struct iosys_map map; 97 int ret; 98 99 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", 100 sizes->surface_width, sizes->surface_height, 101 sizes->surface_bpp); 102 103 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 104 buffer = drm_client_framebuffer_create(client, sizes->surface_width, 105 sizes->surface_height, format); 106 if (IS_ERR(buffer)) 107 return PTR_ERR(buffer); 108 dma_obj = to_drm_gem_dma_obj(buffer->gem); 109 110 fb = buffer->fb; 111 112 ret = drm_client_buffer_vmap(buffer, &map); 113 if (ret) { 114 goto err_drm_client_buffer_delete; 115 } else if (drm_WARN_ON(dev, map.is_iomem)) { 116 ret = -ENODEV; /* I/O memory not supported; use generic emulation */ 117 goto err_drm_client_buffer_delete; 118 } 119 120 fb_helper->buffer = buffer; 121 fb_helper->fb = fb; 122 123 info = drm_fb_helper_alloc_info(fb_helper); 124 if (IS_ERR(info)) { 125 ret = PTR_ERR(info); 126 goto err_drm_client_buffer_vunmap; 127 } 128 129 drm_fb_helper_fill_info(info, fb_helper, sizes); 130 131 info->fbops = &drm_fbdev_dma_fb_ops; 132 133 /* screen */ 134 info->flags |= FBINFO_VIRTFB; /* system memory */ 135 if (dma_obj->map_noncoherent) 136 info->flags |= FBINFO_READS_FAST; /* signal caching */ 137 info->screen_size = sizes->surface_height * fb->pitches[0]; 138 info->screen_buffer = map.vaddr; 139 info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); 140 info->fix.smem_len = info->screen_size; 141 142 /* deferred I/O */ 143 fb_helper->fbdefio.delay = HZ / 20; 144 fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; 145 146 info->fbdefio = &fb_helper->fbdefio; 147 ret = fb_deferred_io_init(info); 148 if (ret) 149 goto err_drm_fb_helper_release_info; 150 151 return 0; 152 153 err_drm_fb_helper_release_info: 154 drm_fb_helper_release_info(fb_helper); 155 err_drm_client_buffer_vunmap: 156 fb_helper->fb = NULL; 157 fb_helper->buffer = NULL; 158 drm_client_buffer_vunmap(buffer); 159 err_drm_client_buffer_delete: 160 drm_client_framebuffer_delete(buffer); 161 return ret; 162 } 163 164 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, 165 struct drm_clip_rect *clip) 166 { 167 struct drm_device *dev = helper->dev; 168 int ret; 169 170 /* Call damage handlers only if necessary */ 171 if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) 172 return 0; 173 174 if (helper->fb->funcs->dirty) { 175 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); 176 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 177 return ret; 178 } 179 180 return 0; 181 } 182 183 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { 184 .fb_probe = drm_fbdev_dma_helper_fb_probe, 185 .fb_dirty = drm_fbdev_dma_helper_fb_dirty, 186 }; 187 188 /* 189 * struct drm_client_funcs 190 */ 191 192 static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client) 193 { 194 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 195 196 if (fb_helper->info) { 197 drm_fb_helper_unregister_info(fb_helper); 198 } else { 199 drm_client_release(&fb_helper->client); 200 drm_fb_helper_unprepare(fb_helper); 201 kfree(fb_helper); 202 } 203 } 204 205 static int drm_fbdev_dma_client_restore(struct drm_client_dev *client) 206 { 207 drm_fb_helper_lastclose(client->dev); 208 209 return 0; 210 } 211 212 static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client) 213 { 214 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 215 struct drm_device *dev = client->dev; 216 int ret; 217 218 if (dev->fb_helper) 219 return drm_fb_helper_hotplug_event(dev->fb_helper); 220 221 ret = drm_fb_helper_init(dev, fb_helper); 222 if (ret) 223 goto err_drm_err; 224 225 if (!drm_drv_uses_atomic_modeset(dev)) 226 drm_helper_disable_unused_functions(dev); 227 228 ret = drm_fb_helper_initial_config(fb_helper); 229 if (ret) 230 goto err_drm_fb_helper_fini; 231 232 return 0; 233 234 err_drm_fb_helper_fini: 235 drm_fb_helper_fini(fb_helper); 236 err_drm_err: 237 drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret); 238 return ret; 239 } 240 241 static const struct drm_client_funcs drm_fbdev_dma_client_funcs = { 242 .owner = THIS_MODULE, 243 .unregister = drm_fbdev_dma_client_unregister, 244 .restore = drm_fbdev_dma_client_restore, 245 .hotplug = drm_fbdev_dma_client_hotplug, 246 }; 247 248 /** 249 * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers 250 * @dev: DRM device 251 * @preferred_bpp: Preferred bits per pixel for the device. 252 * 32 is used if this is zero. 253 * 254 * This function sets up fbdev emulation for GEM DMA drivers that support 255 * dumb buffers with a virtual address and that can be mmap'ed. 256 * drm_fbdev_dma_setup() shall be called after the DRM driver registered 257 * the new DRM device with drm_dev_register(). 258 * 259 * Restore, hotplug events and teardown are all taken care of. Drivers that do 260 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 261 * Simple drivers might use drm_mode_config_helper_suspend(). 262 * 263 * This function is safe to call even when there are no connectors present. 264 * Setup will be retried on the next hotplug event. 265 * 266 * The fbdev is destroyed by drm_dev_unregister(). 267 */ 268 void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp) 269 { 270 struct drm_fb_helper *fb_helper; 271 int ret; 272 273 drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 274 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 275 276 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 277 if (!fb_helper) 278 return; 279 drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs); 280 281 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs); 282 if (ret) { 283 drm_err(dev, "Failed to register client: %d\n", ret); 284 goto err_drm_client_init; 285 } 286 287 drm_client_register(&fb_helper->client); 288 289 return; 290 291 err_drm_client_init: 292 drm_fb_helper_unprepare(fb_helper); 293 kfree(fb_helper); 294 } 295 EXPORT_SYMBOL(drm_fbdev_dma_setup); 296