1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/cred.h> 3 #include <linux/device.h> 4 #include <linux/dma-buf.h> 5 #include <linux/dma-resv.h> 6 #include <linux/highmem.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/memfd.h> 10 #include <linux/miscdevice.h> 11 #include <linux/module.h> 12 #include <linux/shmem_fs.h> 13 #include <linux/hugetlb.h> 14 #include <linux/slab.h> 15 #include <linux/udmabuf.h> 16 #include <linux/vmalloc.h> 17 #include <linux/iosys-map.h> 18 19 static int list_limit = 1024; 20 module_param(list_limit, int, 0644); 21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); 22 23 static int size_limit_mb = 64; 24 module_param(size_limit_mb, int, 0644); 25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); 26 27 struct udmabuf { 28 pgoff_t pagecount; 29 struct folio **folios; 30 31 /** 32 * Unlike folios, pinned_folios is only used for unpin. 33 * So, nr_pinned is not the same to pagecount, the pinned_folios 34 * only set each folio which already pinned when udmabuf_create. 35 * Note that, since a folio may be pinned multiple times, each folio 36 * can be added to pinned_folios multiple times, depending on how many 37 * times the folio has been pinned when create. 38 */ 39 pgoff_t nr_pinned; 40 struct folio **pinned_folios; 41 42 struct sg_table *sg; 43 struct miscdevice *device; 44 pgoff_t *offsets; 45 }; 46 47 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) 48 { 49 struct vm_area_struct *vma = vmf->vma; 50 struct udmabuf *ubuf = vma->vm_private_data; 51 pgoff_t pgoff = vmf->pgoff; 52 unsigned long addr, pfn; 53 vm_fault_t ret; 54 55 if (pgoff >= ubuf->pagecount) 56 return VM_FAULT_SIGBUS; 57 58 pfn = folio_pfn(ubuf->folios[pgoff]); 59 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; 60 61 ret = vmf_insert_pfn(vma, vmf->address, pfn); 62 if (ret & VM_FAULT_ERROR) 63 return ret; 64 65 /* pre fault */ 66 pgoff = vma->vm_pgoff; 67 addr = vma->vm_start; 68 69 for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) { 70 if (addr == vmf->address) 71 continue; 72 73 if (WARN_ON(pgoff >= ubuf->pagecount)) 74 break; 75 76 pfn = folio_pfn(ubuf->folios[pgoff]); 77 pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; 78 79 /** 80 * If the below vmf_insert_pfn() fails, we do not return an 81 * error here during this pre-fault step. However, an error 82 * will be returned if the failure occurs when the addr is 83 * truly accessed. 84 */ 85 if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) 86 break; 87 } 88 89 return ret; 90 } 91 92 static const struct vm_operations_struct udmabuf_vm_ops = { 93 .fault = udmabuf_vm_fault, 94 }; 95 96 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) 97 { 98 struct udmabuf *ubuf = buf->priv; 99 100 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 101 return -EINVAL; 102 103 vma->vm_ops = &udmabuf_vm_ops; 104 vma->vm_private_data = ubuf; 105 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 106 return 0; 107 } 108 109 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) 110 { 111 struct udmabuf *ubuf = buf->priv; 112 struct page **pages; 113 void *vaddr; 114 pgoff_t pg; 115 116 dma_resv_assert_held(buf->resv); 117 118 pages = kvmalloc_objs(*pages, ubuf->pagecount); 119 if (!pages) 120 return -ENOMEM; 121 122 for (pg = 0; pg < ubuf->pagecount; pg++) 123 pages[pg] = folio_page(ubuf->folios[pg], 124 ubuf->offsets[pg] >> PAGE_SHIFT); 125 126 vaddr = vm_map_ram(pages, ubuf->pagecount, -1); 127 kvfree(pages); 128 if (!vaddr) 129 return -EINVAL; 130 131 iosys_map_set_vaddr(map, vaddr); 132 return 0; 133 } 134 135 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) 136 { 137 struct udmabuf *ubuf = buf->priv; 138 139 dma_resv_assert_held(buf->resv); 140 141 vm_unmap_ram(map->vaddr, ubuf->pagecount); 142 } 143 144 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, 145 enum dma_data_direction direction) 146 { 147 struct udmabuf *ubuf = buf->priv; 148 struct sg_table *sg; 149 struct scatterlist *sgl; 150 unsigned int i = 0; 151 int ret; 152 153 sg = kzalloc_obj(*sg); 154 if (!sg) 155 return ERR_PTR(-ENOMEM); 156 157 ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL); 158 if (ret < 0) 159 goto err_alloc; 160 161 for_each_sg(sg->sgl, sgl, ubuf->pagecount, i) 162 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, 163 ubuf->offsets[i]); 164 165 ret = dma_map_sgtable(dev, sg, direction, 0); 166 if (ret < 0) 167 goto err_map; 168 return sg; 169 170 err_map: 171 sg_free_table(sg); 172 err_alloc: 173 kfree(sg); 174 return ERR_PTR(ret); 175 } 176 177 static void put_sg_table(struct device *dev, struct sg_table *sg, 178 enum dma_data_direction direction) 179 { 180 dma_unmap_sgtable(dev, sg, direction, 0); 181 sg_free_table(sg); 182 kfree(sg); 183 } 184 185 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at, 186 enum dma_data_direction direction) 187 { 188 return get_sg_table(at->dev, at->dmabuf, direction); 189 } 190 191 static void unmap_udmabuf(struct dma_buf_attachment *at, 192 struct sg_table *sg, 193 enum dma_data_direction direction) 194 { 195 return put_sg_table(at->dev, sg, direction); 196 } 197 198 static void unpin_all_folios(struct udmabuf *ubuf) 199 { 200 pgoff_t i; 201 202 for (i = 0; i < ubuf->nr_pinned; ++i) 203 unpin_folio(ubuf->pinned_folios[i]); 204 205 kvfree(ubuf->pinned_folios); 206 } 207 208 static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt) 209 { 210 ubuf->folios = kvmalloc_objs(*ubuf->folios, pgcnt); 211 if (!ubuf->folios) 212 return -ENOMEM; 213 214 ubuf->offsets = kvzalloc_objs(*ubuf->offsets, pgcnt); 215 if (!ubuf->offsets) 216 return -ENOMEM; 217 218 ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt, 219 GFP_KERNEL); 220 if (!ubuf->pinned_folios) 221 return -ENOMEM; 222 223 return 0; 224 } 225 226 static __always_inline void deinit_udmabuf(struct udmabuf *ubuf) 227 { 228 unpin_all_folios(ubuf); 229 kvfree(ubuf->offsets); 230 kvfree(ubuf->folios); 231 } 232 233 static void release_udmabuf(struct dma_buf *buf) 234 { 235 struct udmabuf *ubuf = buf->priv; 236 struct device *dev = ubuf->device->this_device; 237 238 if (ubuf->sg) 239 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); 240 241 deinit_udmabuf(ubuf); 242 kfree(ubuf); 243 } 244 245 static int begin_cpu_udmabuf(struct dma_buf *buf, 246 enum dma_data_direction direction) 247 { 248 struct udmabuf *ubuf = buf->priv; 249 struct device *dev = ubuf->device->this_device; 250 int ret = 0; 251 252 if (!ubuf->sg) { 253 ubuf->sg = get_sg_table(dev, buf, direction); 254 if (IS_ERR(ubuf->sg)) { 255 ret = PTR_ERR(ubuf->sg); 256 ubuf->sg = NULL; 257 } 258 } else { 259 dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction); 260 } 261 262 return ret; 263 } 264 265 static int end_cpu_udmabuf(struct dma_buf *buf, 266 enum dma_data_direction direction) 267 { 268 struct udmabuf *ubuf = buf->priv; 269 struct device *dev = ubuf->device->this_device; 270 271 if (!ubuf->sg) 272 return -EINVAL; 273 274 dma_sync_sgtable_for_device(dev, ubuf->sg, direction); 275 return 0; 276 } 277 278 static const struct dma_buf_ops udmabuf_ops = { 279 .map_dma_buf = map_udmabuf, 280 .unmap_dma_buf = unmap_udmabuf, 281 .release = release_udmabuf, 282 .mmap = mmap_udmabuf, 283 .vmap = vmap_udmabuf, 284 .vunmap = vunmap_udmabuf, 285 .begin_cpu_access = begin_cpu_udmabuf, 286 .end_cpu_access = end_cpu_udmabuf, 287 }; 288 289 #define SEALS_WANTED (F_SEAL_SHRINK) 290 #define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE) 291 292 static int check_memfd_seals(struct file *memfd) 293 { 294 int seals; 295 296 if (!shmem_file(memfd) && !is_file_hugepages(memfd)) 297 return -EBADFD; 298 299 seals = memfd_fcntl(memfd, F_GET_SEALS, 0); 300 if (seals == -EINVAL) 301 return -EBADFD; 302 303 if ((seals & SEALS_WANTED) != SEALS_WANTED || 304 (seals & SEALS_DENIED) != 0) 305 return -EINVAL; 306 307 return 0; 308 } 309 310 static struct dma_buf *export_udmabuf(struct udmabuf *ubuf, 311 struct miscdevice *device) 312 { 313 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 314 315 ubuf->device = device; 316 exp_info.ops = &udmabuf_ops; 317 exp_info.size = ubuf->pagecount << PAGE_SHIFT; 318 exp_info.priv = ubuf; 319 exp_info.flags = O_RDWR; 320 321 return dma_buf_export(&exp_info); 322 } 323 324 static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd, 325 loff_t start, loff_t size, struct folio **folios) 326 { 327 pgoff_t nr_pinned = ubuf->nr_pinned; 328 pgoff_t upgcnt = ubuf->pagecount; 329 u32 cur_folio, cur_pgcnt; 330 pgoff_t pgoff, pgcnt; 331 long nr_folios; 332 loff_t end; 333 334 pgcnt = size >> PAGE_SHIFT; 335 end = start + (pgcnt << PAGE_SHIFT) - 1; 336 nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); 337 if (nr_folios <= 0) 338 return nr_folios ? nr_folios : -EINVAL; 339 340 cur_pgcnt = 0; 341 for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) { 342 pgoff_t subpgoff = pgoff; 343 size_t fsize = folio_size(folios[cur_folio]); 344 345 ubuf->pinned_folios[nr_pinned++] = folios[cur_folio]; 346 347 for (; subpgoff < fsize; subpgoff += PAGE_SIZE) { 348 ubuf->folios[upgcnt] = folios[cur_folio]; 349 ubuf->offsets[upgcnt] = subpgoff; 350 ++upgcnt; 351 352 if (++cur_pgcnt >= pgcnt) 353 goto end; 354 } 355 356 /** 357 * In a given range, only the first subpage of the first folio 358 * has an offset, that is returned by memfd_pin_folios(). 359 * The first subpages of other folios (in the range) have an 360 * offset of 0. 361 */ 362 pgoff = 0; 363 } 364 end: 365 ubuf->pagecount = upgcnt; 366 ubuf->nr_pinned = nr_pinned; 367 return 0; 368 } 369 370 static long udmabuf_create(struct miscdevice *device, 371 struct udmabuf_create_list *head, 372 struct udmabuf_create_item *list) 373 { 374 unsigned long max_nr_folios = 0; 375 struct folio **folios = NULL; 376 pgoff_t pgcnt = 0, pglimit; 377 struct udmabuf *ubuf; 378 struct dma_buf *dmabuf; 379 long ret = -EINVAL; 380 u32 i, flags; 381 382 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); 383 if (!ubuf) 384 return -ENOMEM; 385 386 pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; 387 for (i = 0; i < head->count; i++) { 388 pgoff_t subpgcnt; 389 390 if (!PAGE_ALIGNED(list[i].offset)) 391 goto err_noinit; 392 if (!PAGE_ALIGNED(list[i].size)) 393 goto err_noinit; 394 395 subpgcnt = list[i].size >> PAGE_SHIFT; 396 pgcnt += subpgcnt; 397 if (pgcnt > pglimit) 398 goto err_noinit; 399 400 max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios); 401 } 402 403 if (!pgcnt) 404 goto err_noinit; 405 406 ret = init_udmabuf(ubuf, pgcnt); 407 if (ret) 408 goto err; 409 410 folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL); 411 if (!folios) { 412 ret = -ENOMEM; 413 goto err; 414 } 415 416 for (i = 0; i < head->count; i++) { 417 struct file *memfd = fget(list[i].memfd); 418 419 if (!memfd) { 420 ret = -EBADFD; 421 goto err; 422 } 423 424 /* 425 * Take the inode lock to protect against concurrent 426 * memfd_add_seals(), which takes this lock in write mode. 427 */ 428 inode_lock_shared(file_inode(memfd)); 429 ret = check_memfd_seals(memfd); 430 if (ret) 431 goto out_unlock; 432 433 ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset, 434 list[i].size, folios); 435 out_unlock: 436 inode_unlock_shared(file_inode(memfd)); 437 fput(memfd); 438 if (ret) 439 goto err; 440 } 441 442 flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0; 443 dmabuf = export_udmabuf(ubuf, device); 444 if (IS_ERR(dmabuf)) { 445 ret = PTR_ERR(dmabuf); 446 goto err; 447 } 448 /* 449 * Ownership of ubuf is held by the dmabuf from here. 450 * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the 451 * dmabuf and the ubuf (through udmabuf_ops.release). 452 */ 453 454 ret = dma_buf_fd(dmabuf, flags); 455 if (ret < 0) 456 dma_buf_put(dmabuf); 457 458 kvfree(folios); 459 return ret; 460 461 err: 462 deinit_udmabuf(ubuf); 463 err_noinit: 464 kfree(ubuf); 465 kvfree(folios); 466 return ret; 467 } 468 469 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg) 470 { 471 struct udmabuf_create create; 472 struct udmabuf_create_list head; 473 struct udmabuf_create_item list; 474 475 if (copy_from_user(&create, (void __user *)arg, 476 sizeof(create))) 477 return -EFAULT; 478 479 head.flags = create.flags; 480 head.count = 1; 481 list.memfd = create.memfd; 482 list.offset = create.offset; 483 list.size = create.size; 484 485 return udmabuf_create(filp->private_data, &head, &list); 486 } 487 488 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg) 489 { 490 struct udmabuf_create_list head; 491 struct udmabuf_create_item *list; 492 int ret = -EINVAL; 493 u32 lsize; 494 495 if (copy_from_user(&head, (void __user *)arg, sizeof(head))) 496 return -EFAULT; 497 if (head.count > list_limit) 498 return -EINVAL; 499 lsize = sizeof(struct udmabuf_create_item) * head.count; 500 list = memdup_user((void __user *)(arg + sizeof(head)), lsize); 501 if (IS_ERR(list)) 502 return PTR_ERR(list); 503 504 ret = udmabuf_create(filp->private_data, &head, list); 505 kfree(list); 506 return ret; 507 } 508 509 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl, 510 unsigned long arg) 511 { 512 long ret; 513 514 switch (ioctl) { 515 case UDMABUF_CREATE: 516 ret = udmabuf_ioctl_create(filp, arg); 517 break; 518 case UDMABUF_CREATE_LIST: 519 ret = udmabuf_ioctl_create_list(filp, arg); 520 break; 521 default: 522 ret = -ENOTTY; 523 break; 524 } 525 return ret; 526 } 527 528 static const struct file_operations udmabuf_fops = { 529 .owner = THIS_MODULE, 530 .unlocked_ioctl = udmabuf_ioctl, 531 #ifdef CONFIG_COMPAT 532 .compat_ioctl = udmabuf_ioctl, 533 #endif 534 }; 535 536 static struct miscdevice udmabuf_misc = { 537 .minor = MISC_DYNAMIC_MINOR, 538 .name = "udmabuf", 539 .fops = &udmabuf_fops, 540 }; 541 542 static int __init udmabuf_dev_init(void) 543 { 544 int ret; 545 546 ret = misc_register(&udmabuf_misc); 547 if (ret < 0) { 548 pr_err("Could not initialize udmabuf device\n"); 549 return ret; 550 } 551 552 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, 553 DMA_BIT_MASK(64)); 554 if (ret < 0) { 555 pr_err("Could not setup DMA mask for udmabuf device\n"); 556 misc_deregister(&udmabuf_misc); 557 return ret; 558 } 559 560 return 0; 561 } 562 563 static void __exit udmabuf_dev_exit(void) 564 { 565 misc_deregister(&udmabuf_misc); 566 } 567 568 module_init(udmabuf_dev_init) 569 module_exit(udmabuf_dev_exit) 570 571 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); 572