1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* exynos_drm_gem.c 3 * 4 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 5 * Author: Inki Dae <inki.dae@samsung.com> 6 */ 7 8 9 #include <linux/dma-buf.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/module.h> 12 13 #include <drm/drm_dumb_buffers.h> 14 #include <drm/drm_prime.h> 15 #include <drm/drm_vma_manager.h> 16 #include <drm/exynos_drm.h> 17 18 #include "exynos_drm_drv.h" 19 #include "exynos_drm_gem.h" 20 21 MODULE_IMPORT_NS("DMA_BUF"); 22 23 static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 24 25 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) 26 { 27 struct drm_device *dev = exynos_gem->base.dev; 28 unsigned long attr = 0; 29 30 if (exynos_gem->dma_addr) { 31 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n"); 32 return 0; 33 } 34 35 /* 36 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 37 * region will be allocated else physically contiguous 38 * as possible. 39 */ 40 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 41 attr |= DMA_ATTR_FORCE_CONTIGUOUS; 42 43 /* 44 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 45 * else cachable mapping. 46 */ 47 if (exynos_gem->flags & EXYNOS_BO_WC || 48 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 49 attr |= DMA_ATTR_WRITE_COMBINE; 50 51 /* FBDev emulation requires kernel mapping */ 52 if (!kvmap) 53 attr |= DMA_ATTR_NO_KERNEL_MAPPING; 54 55 exynos_gem->dma_attrs = attr; 56 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 57 &exynos_gem->dma_addr, GFP_KERNEL, 58 exynos_gem->dma_attrs); 59 if (!exynos_gem->cookie) { 60 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n"); 61 return -ENOMEM; 62 } 63 64 if (kvmap) 65 exynos_gem->kvaddr = exynos_gem->cookie; 66 67 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n", 68 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 69 return 0; 70 } 71 72 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 73 { 74 struct drm_device *dev = exynos_gem->base.dev; 75 76 if (!exynos_gem->dma_addr) { 77 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n"); 78 return; 79 } 80 81 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n", 82 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 83 84 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 85 (dma_addr_t)exynos_gem->dma_addr, 86 exynos_gem->dma_attrs); 87 } 88 89 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 90 struct drm_file *file_priv, 91 unsigned int *handle) 92 { 93 int ret; 94 95 /* 96 * allocate a id of idr table where the obj is registered 97 * and handle has the id what user can see. 98 */ 99 ret = drm_gem_handle_create(file_priv, obj, handle); 100 if (ret) 101 return ret; 102 103 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle); 104 105 /* drop reference from allocate - handle holds it now. */ 106 drm_gem_object_put(obj); 107 108 return 0; 109 } 110 111 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 112 { 113 struct drm_gem_object *obj = &exynos_gem->base; 114 115 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n", 116 obj->handle_count); 117 118 /* 119 * do not release memory region from exporter. 120 * 121 * the region will be released by exporter 122 * once dmabuf's refcount becomes 0. 123 */ 124 if (obj->import_attach) 125 drm_prime_gem_destroy(obj, exynos_gem->sgt); 126 else 127 exynos_drm_free_buf(exynos_gem); 128 129 /* release file pointer to gem object. */ 130 drm_gem_object_release(obj); 131 132 kfree(exynos_gem); 133 } 134 135 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 136 .open = drm_gem_vm_open, 137 .close = drm_gem_vm_close, 138 }; 139 140 static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { 141 .free = exynos_drm_gem_free_object, 142 .get_sg_table = exynos_drm_gem_prime_get_sg_table, 143 .mmap = exynos_drm_gem_mmap, 144 .vm_ops = &exynos_drm_gem_vm_ops, 145 }; 146 147 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 148 unsigned long size) 149 { 150 struct exynos_drm_gem *exynos_gem; 151 struct drm_gem_object *obj; 152 int ret; 153 154 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 155 if (!exynos_gem) 156 return ERR_PTR(-ENOMEM); 157 158 exynos_gem->size = size; 159 obj = &exynos_gem->base; 160 161 obj->funcs = &exynos_drm_gem_object_funcs; 162 163 ret = drm_gem_object_init(dev, obj, size); 164 if (ret < 0) { 165 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); 166 kfree(exynos_gem); 167 return ERR_PTR(ret); 168 } 169 170 ret = drm_gem_create_mmap_offset(obj); 171 if (ret < 0) { 172 drm_gem_object_release(obj); 173 kfree(exynos_gem); 174 return ERR_PTR(ret); 175 } 176 177 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %p\n", obj->filp); 178 179 return exynos_gem; 180 } 181 182 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 183 unsigned int flags, 184 unsigned long size, 185 bool kvmap) 186 { 187 struct exynos_drm_gem *exynos_gem; 188 int ret; 189 190 if (flags & ~(EXYNOS_BO_MASK)) { 191 DRM_DEV_ERROR(dev->dev, 192 "invalid GEM buffer flags: %u\n", flags); 193 return ERR_PTR(-EINVAL); 194 } 195 196 if (!size) { 197 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size); 198 return ERR_PTR(-EINVAL); 199 } 200 201 size = roundup(size, PAGE_SIZE); 202 203 exynos_gem = exynos_drm_gem_init(dev, size); 204 if (IS_ERR(exynos_gem)) 205 return exynos_gem; 206 207 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { 208 /* 209 * when no IOMMU is available, all allocated buffers are 210 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag 211 */ 212 flags &= ~EXYNOS_BO_NONCONTIG; 213 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); 214 } 215 216 /* set memory type and cache attribute from user side. */ 217 exynos_gem->flags = flags; 218 219 ret = exynos_drm_alloc_buf(exynos_gem, kvmap); 220 if (ret < 0) { 221 drm_gem_object_release(&exynos_gem->base); 222 kfree(exynos_gem); 223 return ERR_PTR(ret); 224 } 225 226 return exynos_gem; 227 } 228 229 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 230 struct drm_file *file_priv) 231 { 232 struct drm_exynos_gem_create *args = data; 233 struct exynos_drm_gem *exynos_gem; 234 int ret; 235 236 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false); 237 if (IS_ERR(exynos_gem)) 238 return PTR_ERR(exynos_gem); 239 240 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 241 &args->handle); 242 if (ret) { 243 exynos_drm_gem_destroy(exynos_gem); 244 return ret; 245 } 246 247 return 0; 248 } 249 250 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 251 struct drm_file *file_priv) 252 { 253 struct drm_exynos_gem_map *args = data; 254 255 return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 256 &args->offset); 257 } 258 259 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp, 260 unsigned int gem_handle) 261 { 262 struct drm_gem_object *obj; 263 264 obj = drm_gem_object_lookup(filp, gem_handle); 265 if (!obj) 266 return NULL; 267 return to_exynos_gem(obj); 268 } 269 270 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 271 struct vm_area_struct *vma) 272 { 273 struct drm_device *drm_dev = exynos_gem->base.dev; 274 unsigned long vm_size; 275 int ret; 276 277 vm_flags_clear(vma, VM_PFNMAP); 278 vma->vm_pgoff = 0; 279 280 vm_size = vma->vm_end - vma->vm_start; 281 282 /* check if user-requested size is valid. */ 283 if (vm_size > exynos_gem->size) 284 return -EINVAL; 285 286 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 287 exynos_gem->dma_addr, exynos_gem->size, 288 exynos_gem->dma_attrs); 289 if (ret < 0) { 290 DRM_ERROR("failed to mmap.\n"); 291 return ret; 292 } 293 294 return 0; 295 } 296 297 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 298 struct drm_file *file_priv) 299 { 300 struct exynos_drm_gem *exynos_gem; 301 struct drm_exynos_gem_info *args = data; 302 struct drm_gem_object *obj; 303 304 obj = drm_gem_object_lookup(file_priv, args->handle); 305 if (!obj) { 306 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n"); 307 return -EINVAL; 308 } 309 310 exynos_gem = to_exynos_gem(obj); 311 312 args->flags = exynos_gem->flags; 313 args->size = exynos_gem->size; 314 315 drm_gem_object_put(obj); 316 317 return 0; 318 } 319 320 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 321 { 322 exynos_drm_gem_destroy(to_exynos_gem(obj)); 323 } 324 325 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 326 struct drm_device *dev, 327 struct drm_mode_create_dumb *args) 328 { 329 struct exynos_drm_gem *exynos_gem; 330 unsigned int flags; 331 int ret; 332 333 ret = drm_mode_size_dumb(dev, args, 0, 0); 334 if (ret) 335 return ret; 336 337 /* 338 * allocate memory to be used for framebuffer. 339 * - this callback would be called by user application 340 * with DRM_IOCTL_MODE_CREATE_DUMB command. 341 */ 342 343 if (is_drm_iommu_supported(dev)) 344 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 345 else 346 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 347 348 exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false); 349 if (IS_ERR(exynos_gem)) { 350 dev_warn(dev->dev, "FB allocation failed.\n"); 351 return PTR_ERR(exynos_gem); 352 } 353 354 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 355 &args->handle); 356 if (ret) { 357 exynos_drm_gem_destroy(exynos_gem); 358 return ret; 359 } 360 361 return 0; 362 } 363 364 static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 365 { 366 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 367 int ret; 368 369 if (obj->import_attach) 370 return dma_buf_mmap(obj->dma_buf, vma, 0); 371 372 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); 373 374 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", 375 exynos_gem->flags); 376 377 /* non-cachable as default. */ 378 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 379 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 380 else if (exynos_gem->flags & EXYNOS_BO_WC) 381 vma->vm_page_prot = 382 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 383 else 384 vma->vm_page_prot = 385 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 386 387 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 388 if (ret) 389 goto err_close_vm; 390 391 return ret; 392 393 err_close_vm: 394 drm_gem_vm_close(vma); 395 396 return ret; 397 } 398 399 /* low-level interface prime helpers */ 400 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, 401 struct dma_buf *dma_buf) 402 { 403 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); 404 } 405 406 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 407 { 408 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 409 struct drm_device *drm_dev = obj->dev; 410 struct sg_table *sgt; 411 int ret; 412 413 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 414 if (!sgt) 415 return ERR_PTR(-ENOMEM); 416 417 ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie, 418 exynos_gem->dma_addr, exynos_gem->size, 419 exynos_gem->dma_attrs); 420 if (ret) { 421 DRM_ERROR("failed to get sgtable, %d\n", ret); 422 kfree(sgt); 423 return ERR_PTR(ret); 424 } 425 426 return sgt; 427 } 428 429 struct drm_gem_object * 430 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 431 struct dma_buf_attachment *attach, 432 struct sg_table *sgt) 433 { 434 struct exynos_drm_gem *exynos_gem; 435 436 /* check if the entries in the sg_table are contiguous */ 437 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { 438 DRM_ERROR("buffer chunks must be mapped contiguously"); 439 return ERR_PTR(-EINVAL); 440 } 441 442 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 443 if (IS_ERR(exynos_gem)) 444 return ERR_CAST(exynos_gem); 445 446 /* 447 * Buffer has been mapped as contiguous into DMA address space, 448 * but if there is IOMMU, it can be either CONTIG or NONCONTIG. 449 * We assume a simplified logic below: 450 */ 451 if (is_drm_iommu_supported(dev)) 452 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 453 else 454 exynos_gem->flags |= EXYNOS_BO_CONTIG; 455 456 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 457 exynos_gem->sgt = sgt; 458 return &exynos_gem->base; 459 } 460