1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2024, Advanced Micro Devices, Inc. 4 */ 5 6 #include <drm/amdxdna_accel.h> 7 #include <drm/drm_cache.h> 8 #include <drm/drm_device.h> 9 #include <drm/drm_gem.h> 10 #include <drm/drm_gem_shmem_helper.h> 11 #include <drm/gpu_scheduler.h> 12 #include <linux/dma-buf.h> 13 #include <linux/dma-direct.h> 14 #include <linux/iosys-map.h> 15 #include <linux/pagemap.h> 16 #include <linux/vmalloc.h> 17 18 #include "amdxdna_ctx.h" 19 #include "amdxdna_gem.h" 20 #include "amdxdna_pci_drv.h" 21 #include "amdxdna_ubuf.h" 22 23 #define XDNA_MAX_CMD_BO_SIZE SZ_32K 24 25 MODULE_IMPORT_NS("DMA_BUF"); 26 27 static int 28 amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo) 29 { 30 struct amdxdna_client *client = abo->client; 31 struct amdxdna_dev *xdna = client->xdna; 32 struct amdxdna_mem *mem = &abo->mem; 33 struct amdxdna_gem_obj *heap; 34 u64 offset; 35 u32 align; 36 int ret; 37 38 mutex_lock(&client->mm_lock); 39 40 heap = client->dev_heap; 41 if (!heap) { 42 ret = -EINVAL; 43 goto unlock_out; 44 } 45 46 if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) { 47 XDNA_ERR(xdna, "Invalid dev heap userptr"); 48 ret = -EINVAL; 49 goto unlock_out; 50 } 51 52 if (mem->size == 0 || mem->size > heap->mem.size) { 53 XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx", 54 mem->size, heap->mem.size); 55 ret = -EINVAL; 56 goto unlock_out; 57 } 58 59 align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift); 60 ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node, 61 mem->size, align, 62 0, DRM_MM_INSERT_BEST); 63 if (ret) { 64 XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); 65 goto unlock_out; 66 } 67 68 mem->dev_addr = abo->mm_node.start; 69 offset = mem->dev_addr - heap->mem.dev_addr; 70 mem->userptr = heap->mem.userptr + offset; 71 mem->kva = heap->mem.kva + offset; 72 73 drm_gem_object_get(to_gobj(heap)); 74 75 unlock_out: 76 mutex_unlock(&client->mm_lock); 77 78 return ret; 79 } 80 81 static void 82 amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo) 83 { 84 mutex_destroy(&abo->lock); 85 kfree(abo); 86 } 87 88 static void 89 amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo) 90 { 91 struct amdxdna_gem_obj *heap; 92 93 mutex_lock(&abo->client->mm_lock); 94 95 drm_mm_remove_node(&abo->mm_node); 96 97 heap = abo->client->dev_heap; 98 drm_gem_object_put(to_gobj(heap)); 99 100 mutex_unlock(&abo->client->mm_lock); 101 } 102 103 static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni, 104 const struct mmu_notifier_range *range, 105 unsigned long cur_seq) 106 { 107 struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier); 108 struct amdxdna_gem_obj *abo = mapp->abo; 109 struct amdxdna_dev *xdna; 110 111 xdna = to_xdna_dev(to_gobj(abo)->dev); 112 XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d", 113 mapp->vma->vm_start, mapp->vma->vm_end, abo->type); 114 115 if (!mmu_notifier_range_blockable(range)) 116 return false; 117 118 down_write(&xdna->notifier_lock); 119 abo->mem.map_invalid = true; 120 mapp->invalid = true; 121 mmu_interval_set_seq(&mapp->notifier, cur_seq); 122 up_write(&xdna->notifier_lock); 123 124 xdna->dev_info->ops->hmm_invalidate(abo, cur_seq); 125 126 if (range->event == MMU_NOTIFY_UNMAP) { 127 down_write(&xdna->notifier_lock); 128 if (!mapp->unmapped) { 129 queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); 130 mapp->unmapped = true; 131 } 132 up_write(&xdna->notifier_lock); 133 } 134 135 return true; 136 } 137 138 static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = { 139 .invalidate = amdxdna_hmm_invalidate, 140 }; 141 142 static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo, 143 struct vm_area_struct *vma) 144 { 145 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 146 struct amdxdna_umap *mapp; 147 148 down_read(&xdna->notifier_lock); 149 list_for_each_entry(mapp, &abo->mem.umap_list, node) { 150 if (!vma || mapp->vma == vma) { 151 if (!mapp->unmapped) { 152 queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); 153 mapp->unmapped = true; 154 } 155 if (vma) 156 break; 157 } 158 } 159 up_read(&xdna->notifier_lock); 160 } 161 162 static void amdxdna_umap_release(struct kref *ref) 163 { 164 struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt); 165 struct vm_area_struct *vma = mapp->vma; 166 struct amdxdna_dev *xdna; 167 168 mmu_interval_notifier_remove(&mapp->notifier); 169 if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping) 170 mapping_clear_unevictable(vma->vm_file->f_mapping); 171 172 xdna = to_xdna_dev(to_gobj(mapp->abo)->dev); 173 down_write(&xdna->notifier_lock); 174 list_del(&mapp->node); 175 up_write(&xdna->notifier_lock); 176 177 kvfree(mapp->range.hmm_pfns); 178 kfree(mapp); 179 } 180 181 void amdxdna_umap_put(struct amdxdna_umap *mapp) 182 { 183 kref_put(&mapp->refcnt, amdxdna_umap_release); 184 } 185 186 static void amdxdna_hmm_unreg_work(struct work_struct *work) 187 { 188 struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap, 189 hmm_unreg_work); 190 191 amdxdna_umap_put(mapp); 192 } 193 194 static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, 195 struct vm_area_struct *vma) 196 { 197 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 198 unsigned long len = vma->vm_end - vma->vm_start; 199 unsigned long addr = vma->vm_start; 200 struct amdxdna_umap *mapp; 201 u32 nr_pages; 202 int ret; 203 204 if (!xdna->dev_info->ops->hmm_invalidate) 205 return 0; 206 207 mapp = kzalloc(sizeof(*mapp), GFP_KERNEL); 208 if (!mapp) 209 return -ENOMEM; 210 211 nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT; 212 mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns), 213 GFP_KERNEL); 214 if (!mapp->range.hmm_pfns) { 215 ret = -ENOMEM; 216 goto free_map; 217 } 218 219 ret = mmu_interval_notifier_insert_locked(&mapp->notifier, 220 current->mm, 221 addr, 222 len, 223 &amdxdna_hmm_ops); 224 if (ret) { 225 XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret); 226 goto free_pfns; 227 } 228 229 mapp->range.notifier = &mapp->notifier; 230 mapp->range.start = vma->vm_start; 231 mapp->range.end = vma->vm_end; 232 mapp->range.default_flags = HMM_PFN_REQ_FAULT; 233 mapp->vma = vma; 234 mapp->abo = abo; 235 kref_init(&mapp->refcnt); 236 237 if (abo->mem.userptr == AMDXDNA_INVALID_ADDR) 238 abo->mem.userptr = addr; 239 INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work); 240 if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping) 241 mapping_set_unevictable(vma->vm_file->f_mapping); 242 243 down_write(&xdna->notifier_lock); 244 list_add_tail(&mapp->node, &abo->mem.umap_list); 245 up_write(&xdna->notifier_lock); 246 247 return 0; 248 249 free_pfns: 250 kvfree(mapp->range.hmm_pfns); 251 free_map: 252 kfree(mapp); 253 return ret; 254 } 255 256 static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj) 257 { 258 struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 259 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 260 261 XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); 262 if (abo->pinned) 263 amdxdna_gem_unpin(abo); 264 265 amdxdna_gem_heap_free(abo); 266 drm_gem_object_release(gobj); 267 amdxdna_gem_destroy_obj(abo); 268 } 269 270 static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, 271 struct vm_area_struct *vma) 272 { 273 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 274 unsigned long num_pages = vma_pages(vma); 275 unsigned long offset = 0; 276 int ret; 277 278 if (!is_import_bo(abo)) { 279 ret = drm_gem_shmem_mmap(&abo->base, vma); 280 if (ret) { 281 XDNA_ERR(xdna, "Failed shmem mmap %d", ret); 282 return ret; 283 } 284 285 /* The buffer is based on memory pages. Fix the flag. */ 286 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); 287 ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, 288 &num_pages); 289 if (ret) { 290 XDNA_ERR(xdna, "Failed insert pages %d", ret); 291 vma->vm_ops->close(vma); 292 return ret; 293 } 294 295 return 0; 296 } 297 298 vma->vm_private_data = NULL; 299 vma->vm_ops = NULL; 300 ret = dma_buf_mmap(abo->dma_buf, vma, 0); 301 if (ret) { 302 XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); 303 return ret; 304 } 305 306 do { 307 vm_fault_t fault_ret; 308 309 fault_ret = handle_mm_fault(vma, vma->vm_start + offset, 310 FAULT_FLAG_WRITE, NULL); 311 if (fault_ret & VM_FAULT_ERROR) { 312 vma->vm_ops->close(vma); 313 XDNA_ERR(xdna, "Fault in page failed"); 314 return -EFAULT; 315 } 316 317 offset += PAGE_SIZE; 318 } while (--num_pages); 319 320 /* Drop the reference drm_gem_mmap_obj() acquired.*/ 321 drm_gem_object_put(to_gobj(abo)); 322 323 return 0; 324 } 325 326 static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj, 327 struct vm_area_struct *vma) 328 { 329 struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 330 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 331 int ret; 332 333 ret = amdxdna_hmm_register(abo, vma); 334 if (ret) 335 return ret; 336 337 ret = amdxdna_insert_pages(abo, vma); 338 if (ret) { 339 XDNA_ERR(xdna, "Failed insert pages, ret %d", ret); 340 goto hmm_unreg; 341 } 342 343 XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx", 344 drm_vma_node_offset_addr(&gobj->vma_node), abo->type, 345 vma->vm_start, gobj->size); 346 return 0; 347 348 hmm_unreg: 349 amdxdna_hmm_unregister(abo, vma); 350 return ret; 351 } 352 353 static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 354 { 355 struct drm_gem_object *gobj = dma_buf->priv; 356 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 357 unsigned long num_pages = vma_pages(vma); 358 int ret; 359 360 vma->vm_ops = &drm_gem_shmem_vm_ops; 361 vma->vm_private_data = gobj; 362 363 drm_gem_object_get(gobj); 364 ret = drm_gem_shmem_mmap(&abo->base, vma); 365 if (ret) 366 goto put_obj; 367 368 /* The buffer is based on memory pages. Fix the flag. */ 369 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); 370 ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, 371 &num_pages); 372 if (ret) 373 goto close_vma; 374 375 return 0; 376 377 close_vma: 378 vma->vm_ops->close(vma); 379 put_obj: 380 drm_gem_object_put(gobj); 381 return ret; 382 } 383 384 static const struct dma_buf_ops amdxdna_dmabuf_ops = { 385 .attach = drm_gem_map_attach, 386 .detach = drm_gem_map_detach, 387 .map_dma_buf = drm_gem_map_dma_buf, 388 .unmap_dma_buf = drm_gem_unmap_dma_buf, 389 .release = drm_gem_dmabuf_release, 390 .mmap = amdxdna_gem_dmabuf_mmap, 391 .vmap = drm_gem_dmabuf_vmap, 392 .vunmap = drm_gem_dmabuf_vunmap, 393 }; 394 395 static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) 396 { 397 struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 398 int ret; 399 400 if (is_import_bo(abo)) 401 ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); 402 else 403 ret = drm_gem_vmap(to_gobj(abo), &map); 404 405 *vaddr = map.vaddr; 406 return ret; 407 } 408 409 static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) 410 { 411 struct iosys_map map; 412 413 if (!abo->mem.kva) 414 return; 415 416 iosys_map_set_vaddr(&map, abo->mem.kva); 417 418 if (is_import_bo(abo)) 419 dma_buf_vunmap_unlocked(abo->dma_buf, &map); 420 else 421 drm_gem_vunmap(to_gobj(abo), &map); 422 } 423 424 static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) 425 { 426 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 427 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 428 429 if (abo->dma_buf) { 430 get_dma_buf(abo->dma_buf); 431 return abo->dma_buf; 432 } 433 434 exp_info.ops = &amdxdna_dmabuf_ops; 435 exp_info.size = gobj->size; 436 exp_info.flags = flags; 437 exp_info.priv = gobj; 438 exp_info.resv = gobj->resv; 439 440 return drm_gem_dmabuf_export(gobj->dev, &exp_info); 441 } 442 443 static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo) 444 { 445 dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL); 446 dma_buf_detach(abo->dma_buf, abo->attach); 447 dma_buf_put(abo->dma_buf); 448 drm_gem_object_release(to_gobj(abo)); 449 kfree(abo); 450 } 451 452 static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) 453 { 454 struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 455 struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 456 457 XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); 458 459 amdxdna_hmm_unregister(abo, NULL); 460 flush_workqueue(xdna->notifier_wq); 461 462 if (abo->pinned) 463 amdxdna_gem_unpin(abo); 464 465 if (abo->type == AMDXDNA_BO_DEV_HEAP) 466 drm_mm_takedown(&abo->mm); 467 468 amdxdna_gem_obj_vunmap(abo); 469 mutex_destroy(&abo->lock); 470 471 if (is_import_bo(abo)) { 472 amdxdna_imported_obj_free(abo); 473 return; 474 } 475 476 drm_gem_shmem_free(&abo->base); 477 } 478 479 static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { 480 .free = amdxdna_gem_dev_obj_free, 481 }; 482 483 static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { 484 .free = amdxdna_gem_obj_free, 485 .print_info = drm_gem_shmem_object_print_info, 486 .pin = drm_gem_shmem_object_pin, 487 .unpin = drm_gem_shmem_object_unpin, 488 .get_sg_table = drm_gem_shmem_object_get_sg_table, 489 .vmap = drm_gem_shmem_object_vmap, 490 .vunmap = drm_gem_shmem_object_vunmap, 491 .mmap = amdxdna_gem_obj_mmap, 492 .vm_ops = &drm_gem_shmem_vm_ops, 493 .export = amdxdna_gem_prime_export, 494 }; 495 496 static struct amdxdna_gem_obj * 497 amdxdna_gem_create_obj(struct drm_device *dev, size_t size) 498 { 499 struct amdxdna_gem_obj *abo; 500 501 abo = kzalloc(sizeof(*abo), GFP_KERNEL); 502 if (!abo) 503 return ERR_PTR(-ENOMEM); 504 505 abo->pinned = false; 506 abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE; 507 mutex_init(&abo->lock); 508 509 abo->mem.userptr = AMDXDNA_INVALID_ADDR; 510 abo->mem.dev_addr = AMDXDNA_INVALID_ADDR; 511 abo->mem.size = size; 512 INIT_LIST_HEAD(&abo->mem.umap_list); 513 514 return abo; 515 } 516 517 /* For drm_driver->gem_create_object callback */ 518 struct drm_gem_object * 519 amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) 520 { 521 struct amdxdna_gem_obj *abo; 522 523 abo = amdxdna_gem_create_obj(dev, size); 524 if (IS_ERR(abo)) 525 return ERR_CAST(abo); 526 527 to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs; 528 529 return to_gobj(abo); 530 } 531 532 static struct amdxdna_gem_obj * 533 amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size) 534 { 535 struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size); 536 537 if (IS_ERR(shmem)) 538 return ERR_CAST(shmem); 539 540 shmem->map_wc = false; 541 return to_xdna_obj(&shmem->base); 542 } 543 544 static struct amdxdna_gem_obj * 545 amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args) 546 { 547 struct amdxdna_dev *xdna = to_xdna_dev(dev); 548 enum amdxdna_ubuf_flag flags = 0; 549 struct amdxdna_drm_va_tbl va_tbl; 550 struct drm_gem_object *gobj; 551 struct dma_buf *dma_buf; 552 553 if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) { 554 XDNA_DBG(xdna, "Access va table failed"); 555 return ERR_PTR(-EINVAL); 556 } 557 558 if (va_tbl.num_entries) { 559 if (args->type == AMDXDNA_BO_CMD) 560 flags |= AMDXDNA_UBUF_FLAG_MAP_DMA; 561 562 dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries, 563 u64_to_user_ptr(args->vaddr + sizeof(va_tbl))); 564 } else { 565 dma_buf = dma_buf_get(va_tbl.dmabuf_fd); 566 } 567 568 if (IS_ERR(dma_buf)) 569 return ERR_CAST(dma_buf); 570 571 gobj = amdxdna_gem_prime_import(dev, dma_buf); 572 if (IS_ERR(gobj)) { 573 dma_buf_put(dma_buf); 574 return ERR_CAST(gobj); 575 } 576 577 dma_buf_put(dma_buf); 578 579 return to_xdna_obj(gobj); 580 } 581 582 static struct amdxdna_gem_obj * 583 amdxdna_gem_create_object(struct drm_device *dev, 584 struct amdxdna_drm_create_bo *args) 585 { 586 size_t aligned_sz = PAGE_ALIGN(args->size); 587 588 if (args->vaddr) 589 return amdxdna_gem_create_ubuf_object(dev, args); 590 591 return amdxdna_gem_create_shmem_object(dev, aligned_sz); 592 } 593 594 struct drm_gem_object * 595 amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) 596 { 597 struct dma_buf_attachment *attach; 598 struct amdxdna_gem_obj *abo; 599 struct drm_gem_object *gobj; 600 struct sg_table *sgt; 601 int ret; 602 603 get_dma_buf(dma_buf); 604 605 attach = dma_buf_attach(dma_buf, dev->dev); 606 if (IS_ERR(attach)) { 607 ret = PTR_ERR(attach); 608 goto put_buf; 609 } 610 611 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); 612 if (IS_ERR(sgt)) { 613 ret = PTR_ERR(sgt); 614 goto fail_detach; 615 } 616 617 gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 618 if (IS_ERR(gobj)) { 619 ret = PTR_ERR(gobj); 620 goto fail_unmap; 621 } 622 623 abo = to_xdna_obj(gobj); 624 abo->attach = attach; 625 abo->dma_buf = dma_buf; 626 627 return gobj; 628 629 fail_unmap: 630 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); 631 fail_detach: 632 dma_buf_detach(dma_buf, attach); 633 put_buf: 634 dma_buf_put(dma_buf); 635 636 return ERR_PTR(ret); 637 } 638 639 static struct amdxdna_gem_obj * 640 amdxdna_drm_alloc_shmem(struct drm_device *dev, 641 struct amdxdna_drm_create_bo *args, 642 struct drm_file *filp) 643 { 644 struct amdxdna_client *client = filp->driver_priv; 645 struct amdxdna_gem_obj *abo; 646 647 abo = amdxdna_gem_create_object(dev, args); 648 if (IS_ERR(abo)) 649 return ERR_CAST(abo); 650 651 abo->client = client; 652 abo->type = AMDXDNA_BO_SHMEM; 653 654 return abo; 655 } 656 657 static struct amdxdna_gem_obj * 658 amdxdna_drm_create_dev_heap(struct drm_device *dev, 659 struct amdxdna_drm_create_bo *args, 660 struct drm_file *filp) 661 { 662 struct amdxdna_client *client = filp->driver_priv; 663 struct amdxdna_dev *xdna = to_xdna_dev(dev); 664 struct amdxdna_gem_obj *abo; 665 int ret; 666 667 if (args->size > xdna->dev_info->dev_mem_size) { 668 XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx", 669 args->size, xdna->dev_info->dev_mem_size); 670 return ERR_PTR(-EINVAL); 671 } 672 673 mutex_lock(&client->mm_lock); 674 if (client->dev_heap) { 675 XDNA_DBG(client->xdna, "dev heap is already created"); 676 ret = -EBUSY; 677 goto mm_unlock; 678 } 679 680 abo = amdxdna_gem_create_object(dev, args); 681 if (IS_ERR(abo)) { 682 ret = PTR_ERR(abo); 683 goto mm_unlock; 684 } 685 686 abo->type = AMDXDNA_BO_DEV_HEAP; 687 abo->client = client; 688 abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; 689 drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); 690 691 ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 692 if (ret) { 693 XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); 694 goto release_obj; 695 } 696 697 client->dev_heap = abo; 698 drm_gem_object_get(to_gobj(abo)); 699 mutex_unlock(&client->mm_lock); 700 701 return abo; 702 703 release_obj: 704 drm_gem_object_put(to_gobj(abo)); 705 mm_unlock: 706 mutex_unlock(&client->mm_lock); 707 return ERR_PTR(ret); 708 } 709 710 struct amdxdna_gem_obj * 711 amdxdna_drm_alloc_dev_bo(struct drm_device *dev, 712 struct amdxdna_drm_create_bo *args, 713 struct drm_file *filp) 714 { 715 struct amdxdna_client *client = filp->driver_priv; 716 struct amdxdna_dev *xdna = to_xdna_dev(dev); 717 size_t aligned_sz = PAGE_ALIGN(args->size); 718 struct amdxdna_gem_obj *abo; 719 int ret; 720 721 abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz); 722 if (IS_ERR(abo)) 723 return abo; 724 725 to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs; 726 abo->type = AMDXDNA_BO_DEV; 727 abo->client = client; 728 729 ret = amdxdna_gem_heap_alloc(abo); 730 if (ret) { 731 XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); 732 amdxdna_gem_destroy_obj(abo); 733 return ERR_PTR(ret); 734 } 735 736 drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz); 737 738 return abo; 739 } 740 741 static struct amdxdna_gem_obj * 742 amdxdna_drm_create_cmd_bo(struct drm_device *dev, 743 struct amdxdna_drm_create_bo *args, 744 struct drm_file *filp) 745 { 746 struct amdxdna_dev *xdna = to_xdna_dev(dev); 747 struct amdxdna_gem_obj *abo; 748 int ret; 749 750 if (args->size > XDNA_MAX_CMD_BO_SIZE) { 751 XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size); 752 return ERR_PTR(-EINVAL); 753 } 754 755 if (args->size < sizeof(struct amdxdna_cmd)) { 756 XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size); 757 return ERR_PTR(-EINVAL); 758 } 759 760 abo = amdxdna_gem_create_object(dev, args); 761 if (IS_ERR(abo)) 762 return ERR_CAST(abo); 763 764 abo->type = AMDXDNA_BO_CMD; 765 abo->client = filp->driver_priv; 766 767 ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 768 if (ret) { 769 XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 770 goto release_obj; 771 } 772 773 return abo; 774 775 release_obj: 776 drm_gem_object_put(to_gobj(abo)); 777 return ERR_PTR(ret); 778 } 779 780 int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 781 { 782 struct amdxdna_dev *xdna = to_xdna_dev(dev); 783 struct amdxdna_drm_create_bo *args = data; 784 struct amdxdna_gem_obj *abo; 785 int ret; 786 787 if (args->flags) 788 return -EINVAL; 789 790 XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx", 791 args->type, args->vaddr, args->size, args->flags); 792 switch (args->type) { 793 case AMDXDNA_BO_SHMEM: 794 abo = amdxdna_drm_alloc_shmem(dev, args, filp); 795 break; 796 case AMDXDNA_BO_DEV_HEAP: 797 abo = amdxdna_drm_create_dev_heap(dev, args, filp); 798 break; 799 case AMDXDNA_BO_DEV: 800 abo = amdxdna_drm_alloc_dev_bo(dev, args, filp); 801 break; 802 case AMDXDNA_BO_CMD: 803 abo = amdxdna_drm_create_cmd_bo(dev, args, filp); 804 break; 805 default: 806 return -EINVAL; 807 } 808 if (IS_ERR(abo)) 809 return PTR_ERR(abo); 810 811 /* ready to publish object to userspace */ 812 ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle); 813 if (ret) { 814 XDNA_ERR(xdna, "Create handle failed"); 815 goto put_obj; 816 } 817 818 XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx", 819 args->handle, args->type, abo->mem.userptr, 820 abo->mem.dev_addr, abo->mem.size); 821 put_obj: 822 /* Dereference object reference. Handle holds it now. */ 823 drm_gem_object_put(to_gobj(abo)); 824 return ret; 825 } 826 827 int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo) 828 { 829 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 830 int ret; 831 832 if (abo->type == AMDXDNA_BO_DEV) 833 abo = abo->client->dev_heap; 834 835 if (is_import_bo(abo)) 836 return 0; 837 838 ret = drm_gem_shmem_pin(&abo->base); 839 840 XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret); 841 return ret; 842 } 843 844 int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) 845 { 846 int ret; 847 848 mutex_lock(&abo->lock); 849 ret = amdxdna_gem_pin_nolock(abo); 850 mutex_unlock(&abo->lock); 851 852 return ret; 853 } 854 855 void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo) 856 { 857 if (abo->type == AMDXDNA_BO_DEV) 858 abo = abo->client->dev_heap; 859 860 if (is_import_bo(abo)) 861 return; 862 863 mutex_lock(&abo->lock); 864 drm_gem_shmem_unpin(&abo->base); 865 mutex_unlock(&abo->lock); 866 } 867 868 struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client, 869 u32 bo_hdl, u8 bo_type) 870 { 871 struct amdxdna_dev *xdna = client->xdna; 872 struct amdxdna_gem_obj *abo; 873 struct drm_gem_object *gobj; 874 875 gobj = drm_gem_object_lookup(client->filp, bo_hdl); 876 if (!gobj) { 877 XDNA_DBG(xdna, "Can not find bo %d", bo_hdl); 878 return NULL; 879 } 880 881 abo = to_xdna_obj(gobj); 882 if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type) 883 return abo; 884 885 drm_gem_object_put(gobj); 886 return NULL; 887 } 888 889 int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 890 { 891 struct amdxdna_drm_get_bo_info *args = data; 892 struct amdxdna_dev *xdna = to_xdna_dev(dev); 893 struct amdxdna_gem_obj *abo; 894 struct drm_gem_object *gobj; 895 int ret = 0; 896 897 if (args->ext || args->ext_flags || args->pad) 898 return -EINVAL; 899 900 gobj = drm_gem_object_lookup(filp, args->handle); 901 if (!gobj) { 902 XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle); 903 return -ENOENT; 904 } 905 906 abo = to_xdna_obj(gobj); 907 args->vaddr = abo->mem.userptr; 908 args->xdna_addr = abo->mem.dev_addr; 909 910 if (abo->type != AMDXDNA_BO_DEV) 911 args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node); 912 else 913 args->map_offset = AMDXDNA_INVALID_ADDR; 914 915 XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx", 916 args->handle, args->map_offset, args->vaddr, args->xdna_addr); 917 918 drm_gem_object_put(gobj); 919 return ret; 920 } 921 922 /* 923 * The sync bo ioctl is to make sure the CPU cache is in sync with memory. 924 * This is required because NPU is not cache coherent device. CPU cache 925 * flushing/invalidation is expensive so it is best to handle this outside 926 * of the command submission path. This ioctl allows explicit cache 927 * flushing/invalidation outside of the critical path. 928 */ 929 int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, 930 void *data, struct drm_file *filp) 931 { 932 struct amdxdna_dev *xdna = to_xdna_dev(dev); 933 struct amdxdna_drm_sync_bo *args = data; 934 struct amdxdna_gem_obj *abo; 935 struct drm_gem_object *gobj; 936 int ret; 937 938 gobj = drm_gem_object_lookup(filp, args->handle); 939 if (!gobj) { 940 XDNA_ERR(xdna, "Lookup GEM object failed"); 941 return -ENOENT; 942 } 943 abo = to_xdna_obj(gobj); 944 945 ret = amdxdna_gem_pin(abo); 946 if (ret) { 947 XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret); 948 goto put_obj; 949 } 950 951 if (is_import_bo(abo)) 952 drm_clflush_sg(abo->base.sgt); 953 else if (abo->mem.kva) 954 drm_clflush_virt_range(abo->mem.kva + args->offset, args->size); 955 else if (abo->base.pages) 956 drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT); 957 else 958 drm_WARN(&xdna->ddev, 1, "Can not get flush memory"); 959 960 amdxdna_gem_unpin(abo); 961 962 XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n", 963 args->handle, args->offset, args->size); 964 965 if (args->direction == SYNC_DIRECT_FROM_DEVICE) 966 ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle); 967 968 put_obj: 969 drm_gem_object_put(gobj); 970 return ret; 971 } 972