1ac49797cSLizhi Hou // SPDX-License-Identifier: GPL-2.0 2ac49797cSLizhi Hou /* 3ac49797cSLizhi Hou * Copyright (C) 2024, Advanced Micro Devices, Inc. 4ac49797cSLizhi Hou */ 5ac49797cSLizhi Hou 6ac49797cSLizhi Hou #include <drm/amdxdna_accel.h> 7ac49797cSLizhi Hou #include <drm/drm_cache.h> 8ac49797cSLizhi Hou #include <drm/drm_device.h> 9ac49797cSLizhi Hou #include <drm/drm_gem.h> 10ac49797cSLizhi Hou #include <drm/drm_gem_shmem_helper.h> 11aac24309SLizhi Hou #include <drm/gpu_scheduler.h> 12e486147cSLizhi Hou #include <linux/dma-buf.h> 13e486147cSLizhi Hou #include <linux/dma-direct.h> 14ac49797cSLizhi Hou #include <linux/iosys-map.h> 15e486147cSLizhi Hou #include <linux/pagemap.h> 16ac49797cSLizhi Hou #include <linux/vmalloc.h> 17ac49797cSLizhi Hou 18ac49797cSLizhi Hou #include "amdxdna_ctx.h" 19ac49797cSLizhi Hou #include "amdxdna_gem.h" 20ac49797cSLizhi Hou #include "amdxdna_pci_drv.h" 21ac49797cSLizhi Hou 22ac49797cSLizhi Hou #define XDNA_MAX_CMD_BO_SIZE SZ_32K 23ac49797cSLizhi Hou 24e486147cSLizhi Hou MODULE_IMPORT_NS("DMA_BUF"); 25e486147cSLizhi Hou 26ac49797cSLizhi Hou static int 27*e252e3f3SLizhi Hou amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo) 28ac49797cSLizhi Hou { 29ac49797cSLizhi Hou struct amdxdna_client *client = abo->client; 30ac49797cSLizhi Hou struct amdxdna_dev *xdna = client->xdna; 31ac49797cSLizhi Hou struct amdxdna_mem *mem = &abo->mem; 32*e252e3f3SLizhi Hou struct amdxdna_gem_obj *heap; 33ac49797cSLizhi Hou u64 offset; 34ac49797cSLizhi Hou u32 align; 35ac49797cSLizhi Hou int ret; 36ac49797cSLizhi Hou 37*e252e3f3SLizhi Hou mutex_lock(&client->mm_lock); 38*e252e3f3SLizhi Hou 39*e252e3f3SLizhi Hou heap = client->dev_heap; 40*e252e3f3SLizhi Hou if (!heap) { 41*e252e3f3SLizhi Hou ret = -EINVAL; 42*e252e3f3SLizhi Hou goto unlock_out; 43*e252e3f3SLizhi Hou } 44*e252e3f3SLizhi Hou 45*e252e3f3SLizhi Hou if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) { 46*e252e3f3SLizhi Hou XDNA_ERR(xdna, "Invalid dev heap userptr"); 47*e252e3f3SLizhi Hou ret = -EINVAL; 48*e252e3f3SLizhi Hou goto unlock_out; 49*e252e3f3SLizhi Hou } 50*e252e3f3SLizhi Hou 51*e252e3f3SLizhi Hou if (mem->size == 0 || mem->size > heap->mem.size) { 52*e252e3f3SLizhi Hou XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx", 53*e252e3f3SLizhi Hou mem->size, heap->mem.size); 54*e252e3f3SLizhi Hou ret = -EINVAL; 55*e252e3f3SLizhi Hou goto unlock_out; 56*e252e3f3SLizhi Hou } 57*e252e3f3SLizhi Hou 58ac49797cSLizhi Hou align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift); 59*e252e3f3SLizhi Hou ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node, 60ac49797cSLizhi Hou mem->size, align, 61ac49797cSLizhi Hou 0, DRM_MM_INSERT_BEST); 62ac49797cSLizhi Hou if (ret) { 63ac49797cSLizhi Hou XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); 64*e252e3f3SLizhi Hou goto unlock_out; 65ac49797cSLizhi Hou } 66ac49797cSLizhi Hou 67ac49797cSLizhi Hou mem->dev_addr = abo->mm_node.start; 68*e252e3f3SLizhi Hou offset = mem->dev_addr - heap->mem.dev_addr; 69*e252e3f3SLizhi Hou mem->userptr = heap->mem.userptr + offset; 70*e252e3f3SLizhi Hou mem->kva = heap->mem.kva + offset; 71ac49797cSLizhi Hou 72*e252e3f3SLizhi Hou drm_gem_object_get(to_gobj(heap)); 73*e252e3f3SLizhi Hou 74*e252e3f3SLizhi Hou unlock_out: 75*e252e3f3SLizhi Hou mutex_unlock(&client->mm_lock); 76*e252e3f3SLizhi Hou 77*e252e3f3SLizhi Hou return ret; 78*e252e3f3SLizhi Hou } 79*e252e3f3SLizhi Hou 80*e252e3f3SLizhi Hou static void 81*e252e3f3SLizhi Hou amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo) 82*e252e3f3SLizhi Hou { 83*e252e3f3SLizhi Hou mutex_destroy(&abo->lock); 84*e252e3f3SLizhi Hou kfree(abo); 85*e252e3f3SLizhi Hou } 86*e252e3f3SLizhi Hou 87*e252e3f3SLizhi Hou static void 88*e252e3f3SLizhi Hou amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo) 89*e252e3f3SLizhi Hou { 90*e252e3f3SLizhi Hou struct amdxdna_gem_obj *heap; 91*e252e3f3SLizhi Hou 92*e252e3f3SLizhi Hou mutex_lock(&abo->client->mm_lock); 93*e252e3f3SLizhi Hou 94ac49797cSLizhi Hou drm_mm_remove_node(&abo->mm_node); 95ac49797cSLizhi Hou 96*e252e3f3SLizhi Hou heap = abo->client->dev_heap; 97*e252e3f3SLizhi Hou drm_gem_object_put(to_gobj(heap)); 98*e252e3f3SLizhi Hou 99*e252e3f3SLizhi Hou mutex_unlock(&abo->client->mm_lock); 100ac49797cSLizhi Hou } 101ac49797cSLizhi Hou 102e486147cSLizhi Hou static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni, 103e486147cSLizhi Hou const struct mmu_notifier_range *range, 104e486147cSLizhi Hou unsigned long cur_seq) 105e486147cSLizhi Hou { 106e486147cSLizhi Hou struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier); 107e486147cSLizhi Hou struct amdxdna_gem_obj *abo = mapp->abo; 108e486147cSLizhi Hou struct amdxdna_dev *xdna; 109e486147cSLizhi Hou 110e486147cSLizhi Hou xdna = to_xdna_dev(to_gobj(abo)->dev); 111e486147cSLizhi Hou XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d", 112e486147cSLizhi Hou mapp->vma->vm_start, mapp->vma->vm_end, abo->type); 113e486147cSLizhi Hou 114e486147cSLizhi Hou if (!mmu_notifier_range_blockable(range)) 115e486147cSLizhi Hou return false; 116e486147cSLizhi Hou 117e486147cSLizhi Hou down_write(&xdna->notifier_lock); 118e486147cSLizhi Hou abo->mem.map_invalid = true; 119e486147cSLizhi Hou mapp->invalid = true; 120e486147cSLizhi Hou mmu_interval_set_seq(&mapp->notifier, cur_seq); 121e486147cSLizhi Hou up_write(&xdna->notifier_lock); 122e486147cSLizhi Hou 123e486147cSLizhi Hou xdna->dev_info->ops->hmm_invalidate(abo, cur_seq); 124e486147cSLizhi Hou 125e486147cSLizhi Hou if (range->event == MMU_NOTIFY_UNMAP) { 126e486147cSLizhi Hou down_write(&xdna->notifier_lock); 127e486147cSLizhi Hou if (!mapp->unmapped) { 128e486147cSLizhi Hou queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); 129e486147cSLizhi Hou mapp->unmapped = true; 130e486147cSLizhi Hou } 131e486147cSLizhi Hou up_write(&xdna->notifier_lock); 132e486147cSLizhi Hou } 133e486147cSLizhi Hou 134e486147cSLizhi Hou return true; 135e486147cSLizhi Hou } 136e486147cSLizhi Hou 137e486147cSLizhi Hou static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = { 138e486147cSLizhi Hou .invalidate = amdxdna_hmm_invalidate, 139e486147cSLizhi Hou }; 140e486147cSLizhi Hou 141e486147cSLizhi Hou static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo, 142e486147cSLizhi Hou struct vm_area_struct *vma) 143e486147cSLizhi Hou { 144e486147cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 145e486147cSLizhi Hou struct amdxdna_umap *mapp; 146e486147cSLizhi Hou 147e486147cSLizhi Hou down_read(&xdna->notifier_lock); 148e486147cSLizhi Hou list_for_each_entry(mapp, &abo->mem.umap_list, node) { 149e486147cSLizhi Hou if (!vma || mapp->vma == vma) { 150e486147cSLizhi Hou if (!mapp->unmapped) { 151e486147cSLizhi Hou queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); 152e486147cSLizhi Hou mapp->unmapped = true; 153e486147cSLizhi Hou } 154e486147cSLizhi Hou if (vma) 155e486147cSLizhi Hou break; 156e486147cSLizhi Hou } 157e486147cSLizhi Hou } 158e486147cSLizhi Hou up_read(&xdna->notifier_lock); 159e486147cSLizhi Hou } 160e486147cSLizhi Hou 161e486147cSLizhi Hou static void amdxdna_umap_release(struct kref *ref) 162e486147cSLizhi Hou { 163e486147cSLizhi Hou struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt); 164e486147cSLizhi Hou struct vm_area_struct *vma = mapp->vma; 165e486147cSLizhi Hou struct amdxdna_dev *xdna; 166e486147cSLizhi Hou 167e486147cSLizhi Hou mmu_interval_notifier_remove(&mapp->notifier); 168e486147cSLizhi Hou if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping) 169e486147cSLizhi Hou mapping_clear_unevictable(vma->vm_file->f_mapping); 170e486147cSLizhi Hou 171e486147cSLizhi Hou xdna = to_xdna_dev(to_gobj(mapp->abo)->dev); 172e486147cSLizhi Hou down_write(&xdna->notifier_lock); 173e486147cSLizhi Hou list_del(&mapp->node); 174e486147cSLizhi Hou up_write(&xdna->notifier_lock); 175e486147cSLizhi Hou 176e486147cSLizhi Hou kvfree(mapp->range.hmm_pfns); 177e486147cSLizhi Hou kfree(mapp); 178e486147cSLizhi Hou } 179e486147cSLizhi Hou 180e486147cSLizhi Hou void amdxdna_umap_put(struct amdxdna_umap *mapp) 181e486147cSLizhi Hou { 182e486147cSLizhi Hou kref_put(&mapp->refcnt, amdxdna_umap_release); 183e486147cSLizhi Hou } 184e486147cSLizhi Hou 185e486147cSLizhi Hou static void amdxdna_hmm_unreg_work(struct work_struct *work) 186e486147cSLizhi Hou { 187e486147cSLizhi Hou struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap, 188e486147cSLizhi Hou hmm_unreg_work); 189e486147cSLizhi Hou 190e486147cSLizhi Hou amdxdna_umap_put(mapp); 191e486147cSLizhi Hou } 192e486147cSLizhi Hou 193e486147cSLizhi Hou static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, 194e486147cSLizhi Hou struct vm_area_struct *vma) 195e486147cSLizhi Hou { 196e486147cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 197e486147cSLizhi Hou unsigned long len = vma->vm_end - vma->vm_start; 198e486147cSLizhi Hou unsigned long addr = vma->vm_start; 199e486147cSLizhi Hou struct amdxdna_umap *mapp; 200e486147cSLizhi Hou u32 nr_pages; 201e486147cSLizhi Hou int ret; 202e486147cSLizhi Hou 203e486147cSLizhi Hou if (!xdna->dev_info->ops->hmm_invalidate) 204e486147cSLizhi Hou return 0; 205e486147cSLizhi Hou 206e486147cSLizhi Hou mapp = kzalloc(sizeof(*mapp), GFP_KERNEL); 207e486147cSLizhi Hou if (!mapp) 208e486147cSLizhi Hou return -ENOMEM; 209e486147cSLizhi Hou 210e486147cSLizhi Hou nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT; 211e486147cSLizhi Hou mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns), 212e486147cSLizhi Hou GFP_KERNEL); 213e486147cSLizhi Hou if (!mapp->range.hmm_pfns) { 214e486147cSLizhi Hou ret = -ENOMEM; 215e486147cSLizhi Hou goto free_map; 216e486147cSLizhi Hou } 217e486147cSLizhi Hou 218e486147cSLizhi Hou ret = mmu_interval_notifier_insert_locked(&mapp->notifier, 219e486147cSLizhi Hou current->mm, 220e486147cSLizhi Hou addr, 221e486147cSLizhi Hou len, 222e486147cSLizhi Hou &amdxdna_hmm_ops); 223e486147cSLizhi Hou if (ret) { 224e486147cSLizhi Hou XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret); 225e486147cSLizhi Hou goto free_pfns; 226e486147cSLizhi Hou } 227e486147cSLizhi Hou 228e486147cSLizhi Hou mapp->range.notifier = &mapp->notifier; 229e486147cSLizhi Hou mapp->range.start = vma->vm_start; 230e486147cSLizhi Hou mapp->range.end = vma->vm_end; 231e486147cSLizhi Hou mapp->range.default_flags = HMM_PFN_REQ_FAULT; 232e486147cSLizhi Hou mapp->vma = vma; 233e486147cSLizhi Hou mapp->abo = abo; 234e486147cSLizhi Hou kref_init(&mapp->refcnt); 235e486147cSLizhi Hou 236e486147cSLizhi Hou if (abo->mem.userptr == AMDXDNA_INVALID_ADDR) 237e486147cSLizhi Hou abo->mem.userptr = addr; 238e486147cSLizhi Hou INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work); 239e486147cSLizhi Hou if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping) 240e486147cSLizhi Hou mapping_set_unevictable(vma->vm_file->f_mapping); 241e486147cSLizhi Hou 242e486147cSLizhi Hou down_write(&xdna->notifier_lock); 243e486147cSLizhi Hou list_add_tail(&mapp->node, &abo->mem.umap_list); 244e486147cSLizhi Hou up_write(&xdna->notifier_lock); 245e486147cSLizhi Hou 246e486147cSLizhi Hou return 0; 247e486147cSLizhi Hou 248e486147cSLizhi Hou free_pfns: 249e486147cSLizhi Hou kvfree(mapp->range.hmm_pfns); 250e486147cSLizhi Hou free_map: 251e486147cSLizhi Hou kfree(mapp); 252e486147cSLizhi Hou return ret; 253e486147cSLizhi Hou } 254e486147cSLizhi Hou 255*e252e3f3SLizhi Hou static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj) 256*e252e3f3SLizhi Hou { 257*e252e3f3SLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 258*e252e3f3SLizhi Hou struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 259*e252e3f3SLizhi Hou 260*e252e3f3SLizhi Hou XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); 261*e252e3f3SLizhi Hou if (abo->pinned) 262*e252e3f3SLizhi Hou amdxdna_gem_unpin(abo); 263*e252e3f3SLizhi Hou 264*e252e3f3SLizhi Hou amdxdna_gem_heap_free(abo); 265*e252e3f3SLizhi Hou drm_gem_object_release(gobj); 266*e252e3f3SLizhi Hou amdxdna_gem_destroy_obj(abo); 267*e252e3f3SLizhi Hou } 268*e252e3f3SLizhi Hou 269e486147cSLizhi Hou static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, 270e486147cSLizhi Hou struct vm_area_struct *vma) 271e486147cSLizhi Hou { 272e486147cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 273e486147cSLizhi Hou unsigned long num_pages = vma_pages(vma); 274e486147cSLizhi Hou unsigned long offset = 0; 275e486147cSLizhi Hou int ret; 276e486147cSLizhi Hou 277e486147cSLizhi Hou if (!is_import_bo(abo)) { 278e486147cSLizhi Hou ret = drm_gem_shmem_mmap(&abo->base, vma); 279e486147cSLizhi Hou if (ret) { 280e486147cSLizhi Hou XDNA_ERR(xdna, "Failed shmem mmap %d", ret); 281e486147cSLizhi Hou return ret; 282e486147cSLizhi Hou } 283e486147cSLizhi Hou 284e486147cSLizhi Hou /* The buffer is based on memory pages. Fix the flag. */ 285e486147cSLizhi Hou vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); 286e486147cSLizhi Hou ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, 287e486147cSLizhi Hou &num_pages); 288e486147cSLizhi Hou if (ret) { 289e486147cSLizhi Hou XDNA_ERR(xdna, "Failed insert pages %d", ret); 290e486147cSLizhi Hou vma->vm_ops->close(vma); 291e486147cSLizhi Hou return ret; 292e486147cSLizhi Hou } 293e486147cSLizhi Hou 294e486147cSLizhi Hou return 0; 295e486147cSLizhi Hou } 296e486147cSLizhi Hou 297e486147cSLizhi Hou vma->vm_private_data = NULL; 298e486147cSLizhi Hou vma->vm_ops = NULL; 299e486147cSLizhi Hou ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0); 300e486147cSLizhi Hou if (ret) { 301e486147cSLizhi Hou XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); 302e486147cSLizhi Hou return ret; 303e486147cSLizhi Hou } 304e486147cSLizhi Hou 305e486147cSLizhi Hou do { 306e486147cSLizhi Hou vm_fault_t fault_ret; 307e486147cSLizhi Hou 308e486147cSLizhi Hou fault_ret = handle_mm_fault(vma, vma->vm_start + offset, 309e486147cSLizhi Hou FAULT_FLAG_WRITE, NULL); 310e486147cSLizhi Hou if (fault_ret & VM_FAULT_ERROR) { 311e486147cSLizhi Hou vma->vm_ops->close(vma); 312e486147cSLizhi Hou XDNA_ERR(xdna, "Fault in page failed"); 313e486147cSLizhi Hou return -EFAULT; 314e486147cSLizhi Hou } 315e486147cSLizhi Hou 316e486147cSLizhi Hou offset += PAGE_SIZE; 317e486147cSLizhi Hou } while (--num_pages); 318e486147cSLizhi Hou 319e486147cSLizhi Hou /* Drop the reference drm_gem_mmap_obj() acquired.*/ 320e486147cSLizhi Hou drm_gem_object_put(to_gobj(abo)); 321e486147cSLizhi Hou 322e486147cSLizhi Hou return 0; 323e486147cSLizhi Hou } 324e486147cSLizhi Hou 325e486147cSLizhi Hou static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj, 326e486147cSLizhi Hou struct vm_area_struct *vma) 327e486147cSLizhi Hou { 328e486147cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 329e486147cSLizhi Hou struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 330e486147cSLizhi Hou int ret; 331e486147cSLizhi Hou 332e486147cSLizhi Hou ret = amdxdna_hmm_register(abo, vma); 333e486147cSLizhi Hou if (ret) 334e486147cSLizhi Hou return ret; 335e486147cSLizhi Hou 336e486147cSLizhi Hou ret = amdxdna_insert_pages(abo, vma); 337e486147cSLizhi Hou if (ret) { 338e486147cSLizhi Hou XDNA_ERR(xdna, "Failed insert pages, ret %d", ret); 339e486147cSLizhi Hou goto hmm_unreg; 340e486147cSLizhi Hou } 341e486147cSLizhi Hou 342e486147cSLizhi Hou XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx", 343e486147cSLizhi Hou drm_vma_node_offset_addr(&gobj->vma_node), abo->type, 344e486147cSLizhi Hou vma->vm_start, gobj->size); 345e486147cSLizhi Hou return 0; 346e486147cSLizhi Hou 347e486147cSLizhi Hou hmm_unreg: 348e486147cSLizhi Hou amdxdna_hmm_unregister(abo, vma); 349e486147cSLizhi Hou return ret; 350e486147cSLizhi Hou } 351e486147cSLizhi Hou 352e486147cSLizhi Hou static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 353e486147cSLizhi Hou { 354e486147cSLizhi Hou struct drm_gem_object *gobj = dma_buf->priv; 355e486147cSLizhi Hou struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 356e486147cSLizhi Hou unsigned long num_pages = vma_pages(vma); 357e486147cSLizhi Hou int ret; 358e486147cSLizhi Hou 359e486147cSLizhi Hou vma->vm_ops = &drm_gem_shmem_vm_ops; 360e486147cSLizhi Hou vma->vm_private_data = gobj; 361e486147cSLizhi Hou 362e486147cSLizhi Hou drm_gem_object_get(gobj); 363e486147cSLizhi Hou ret = drm_gem_shmem_mmap(&abo->base, vma); 364e486147cSLizhi Hou if (ret) 365e486147cSLizhi Hou goto put_obj; 366e486147cSLizhi Hou 367e486147cSLizhi Hou /* The buffer is based on memory pages. Fix the flag. */ 368e486147cSLizhi Hou vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); 369e486147cSLizhi Hou ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, 370e486147cSLizhi Hou &num_pages); 371e486147cSLizhi Hou if (ret) 372e486147cSLizhi Hou goto close_vma; 373e486147cSLizhi Hou 374e486147cSLizhi Hou return 0; 375e486147cSLizhi Hou 376e486147cSLizhi Hou close_vma: 377e486147cSLizhi Hou vma->vm_ops->close(vma); 378e486147cSLizhi Hou put_obj: 379e486147cSLizhi Hou drm_gem_object_put(gobj); 380e486147cSLizhi Hou return ret; 381e486147cSLizhi Hou } 382e486147cSLizhi Hou 383e486147cSLizhi Hou static const struct dma_buf_ops amdxdna_dmabuf_ops = { 384e486147cSLizhi Hou .attach = drm_gem_map_attach, 385e486147cSLizhi Hou .detach = drm_gem_map_detach, 386e486147cSLizhi Hou .map_dma_buf = drm_gem_map_dma_buf, 387e486147cSLizhi Hou .unmap_dma_buf = drm_gem_unmap_dma_buf, 388e486147cSLizhi Hou .release = drm_gem_dmabuf_release, 389e486147cSLizhi Hou .mmap = amdxdna_gem_dmabuf_mmap, 390e486147cSLizhi Hou .vmap = drm_gem_dmabuf_vmap, 391e486147cSLizhi Hou .vunmap = drm_gem_dmabuf_vunmap, 392e486147cSLizhi Hou }; 393e486147cSLizhi Hou 394e486147cSLizhi Hou static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) 395e486147cSLizhi Hou { 396e486147cSLizhi Hou DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 397e486147cSLizhi Hou 398e486147cSLizhi Hou exp_info.ops = &amdxdna_dmabuf_ops; 399e486147cSLizhi Hou exp_info.size = gobj->size; 400e486147cSLizhi Hou exp_info.flags = flags; 401e486147cSLizhi Hou exp_info.priv = gobj; 402e486147cSLizhi Hou exp_info.resv = gobj->resv; 403e486147cSLizhi Hou 404e486147cSLizhi Hou return drm_gem_dmabuf_export(gobj->dev, &exp_info); 405e486147cSLizhi Hou } 406e486147cSLizhi Hou 407e486147cSLizhi Hou static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo) 408e486147cSLizhi Hou { 409e486147cSLizhi Hou dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL); 410e486147cSLizhi Hou dma_buf_detach(abo->dma_buf, abo->attach); 411e486147cSLizhi Hou dma_buf_put(abo->dma_buf); 412e486147cSLizhi Hou drm_gem_object_release(to_gobj(abo)); 413e486147cSLizhi Hou kfree(abo); 414e486147cSLizhi Hou } 415e486147cSLizhi Hou 416ac49797cSLizhi Hou static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) 417ac49797cSLizhi Hou { 418ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); 419ac49797cSLizhi Hou struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); 420ac49797cSLizhi Hou struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); 421ac49797cSLizhi Hou 422ac49797cSLizhi Hou XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); 423e486147cSLizhi Hou 424e486147cSLizhi Hou amdxdna_hmm_unregister(abo, NULL); 425e486147cSLizhi Hou flush_workqueue(xdna->notifier_wq); 426e486147cSLizhi Hou 427ac49797cSLizhi Hou if (abo->pinned) 428ac49797cSLizhi Hou amdxdna_gem_unpin(abo); 429ac49797cSLizhi Hou 430ac49797cSLizhi Hou if (abo->type == AMDXDNA_BO_DEV_HEAP) 431ac49797cSLizhi Hou drm_mm_takedown(&abo->mm); 432ac49797cSLizhi Hou 433a600794aSBoris Brezillon drm_gem_vunmap(gobj, &map); 434ac49797cSLizhi Hou mutex_destroy(&abo->lock); 435e486147cSLizhi Hou 436e486147cSLizhi Hou if (is_import_bo(abo)) { 437e486147cSLizhi Hou amdxdna_imported_obj_free(abo); 438e486147cSLizhi Hou return; 439e486147cSLizhi Hou } 440e486147cSLizhi Hou 441ac49797cSLizhi Hou drm_gem_shmem_free(&abo->base); 442ac49797cSLizhi Hou } 443ac49797cSLizhi Hou 444ac49797cSLizhi Hou static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { 445*e252e3f3SLizhi Hou .free = amdxdna_gem_dev_obj_free, 446ac49797cSLizhi Hou }; 447ac49797cSLizhi Hou 448ac49797cSLizhi Hou static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { 449ac49797cSLizhi Hou .free = amdxdna_gem_obj_free, 450ac49797cSLizhi Hou .print_info = drm_gem_shmem_object_print_info, 451ac49797cSLizhi Hou .pin = drm_gem_shmem_object_pin, 452ac49797cSLizhi Hou .unpin = drm_gem_shmem_object_unpin, 453ac49797cSLizhi Hou .get_sg_table = drm_gem_shmem_object_get_sg_table, 454ac49797cSLizhi Hou .vmap = drm_gem_shmem_object_vmap, 455ac49797cSLizhi Hou .vunmap = drm_gem_shmem_object_vunmap, 456ac49797cSLizhi Hou .mmap = amdxdna_gem_obj_mmap, 457e486147cSLizhi Hou .vm_ops = &drm_gem_shmem_vm_ops, 458e486147cSLizhi Hou .export = amdxdna_gem_prime_export, 459ac49797cSLizhi Hou }; 460ac49797cSLizhi Hou 461ac49797cSLizhi Hou static struct amdxdna_gem_obj * 462ac49797cSLizhi Hou amdxdna_gem_create_obj(struct drm_device *dev, size_t size) 463ac49797cSLizhi Hou { 464ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 465ac49797cSLizhi Hou 466ac49797cSLizhi Hou abo = kzalloc(sizeof(*abo), GFP_KERNEL); 467ac49797cSLizhi Hou if (!abo) 468ac49797cSLizhi Hou return ERR_PTR(-ENOMEM); 469ac49797cSLizhi Hou 470ac49797cSLizhi Hou abo->pinned = false; 471ac49797cSLizhi Hou abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE; 472ac49797cSLizhi Hou mutex_init(&abo->lock); 473ac49797cSLizhi Hou 474ac49797cSLizhi Hou abo->mem.userptr = AMDXDNA_INVALID_ADDR; 475ac49797cSLizhi Hou abo->mem.dev_addr = AMDXDNA_INVALID_ADDR; 476ac49797cSLizhi Hou abo->mem.size = size; 477e486147cSLizhi Hou INIT_LIST_HEAD(&abo->mem.umap_list); 478ac49797cSLizhi Hou 479ac49797cSLizhi Hou return abo; 480ac49797cSLizhi Hou } 481ac49797cSLizhi Hou 482ac49797cSLizhi Hou /* For drm_driver->gem_create_object callback */ 483ac49797cSLizhi Hou struct drm_gem_object * 484ac49797cSLizhi Hou amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) 485ac49797cSLizhi Hou { 486ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 487ac49797cSLizhi Hou 488ac49797cSLizhi Hou abo = amdxdna_gem_create_obj(dev, size); 489ac49797cSLizhi Hou if (IS_ERR(abo)) 490ac49797cSLizhi Hou return ERR_CAST(abo); 491ac49797cSLizhi Hou 492ac49797cSLizhi Hou to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs; 493ac49797cSLizhi Hou 494ac49797cSLizhi Hou return to_gobj(abo); 495ac49797cSLizhi Hou } 496ac49797cSLizhi Hou 497e486147cSLizhi Hou struct drm_gem_object * 498e486147cSLizhi Hou amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) 499e486147cSLizhi Hou { 500e486147cSLizhi Hou struct dma_buf_attachment *attach; 501e486147cSLizhi Hou struct amdxdna_gem_obj *abo; 502e486147cSLizhi Hou struct drm_gem_object *gobj; 503e486147cSLizhi Hou struct sg_table *sgt; 504e486147cSLizhi Hou int ret; 505e486147cSLizhi Hou 506e486147cSLizhi Hou get_dma_buf(dma_buf); 507e486147cSLizhi Hou 508e486147cSLizhi Hou attach = dma_buf_attach(dma_buf, dev->dev); 509e486147cSLizhi Hou if (IS_ERR(attach)) { 510e486147cSLizhi Hou ret = PTR_ERR(attach); 511e486147cSLizhi Hou goto put_buf; 512e486147cSLizhi Hou } 513e486147cSLizhi Hou 514e486147cSLizhi Hou sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); 515e486147cSLizhi Hou if (IS_ERR(sgt)) { 516e486147cSLizhi Hou ret = PTR_ERR(sgt); 517e486147cSLizhi Hou goto fail_detach; 518e486147cSLizhi Hou } 519e486147cSLizhi Hou 520e486147cSLizhi Hou gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 521e486147cSLizhi Hou if (IS_ERR(gobj)) { 522e486147cSLizhi Hou ret = PTR_ERR(gobj); 523e486147cSLizhi Hou goto fail_unmap; 524e486147cSLizhi Hou } 525e486147cSLizhi Hou 526e486147cSLizhi Hou abo = to_xdna_obj(gobj); 527e486147cSLizhi Hou abo->attach = attach; 528e486147cSLizhi Hou abo->dma_buf = dma_buf; 529e486147cSLizhi Hou 530e486147cSLizhi Hou return gobj; 531e486147cSLizhi Hou 532e486147cSLizhi Hou fail_unmap: 533e486147cSLizhi Hou dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); 534e486147cSLizhi Hou fail_detach: 535e486147cSLizhi Hou dma_buf_detach(dma_buf, attach); 536e486147cSLizhi Hou put_buf: 537e486147cSLizhi Hou dma_buf_put(dma_buf); 538e486147cSLizhi Hou 539e486147cSLizhi Hou return ERR_PTR(ret); 540e486147cSLizhi Hou } 541e486147cSLizhi Hou 542ac49797cSLizhi Hou static struct amdxdna_gem_obj * 543ac49797cSLizhi Hou amdxdna_drm_alloc_shmem(struct drm_device *dev, 544ac49797cSLizhi Hou struct amdxdna_drm_create_bo *args, 545ac49797cSLizhi Hou struct drm_file *filp) 546ac49797cSLizhi Hou { 547ac49797cSLizhi Hou struct amdxdna_client *client = filp->driver_priv; 548ac49797cSLizhi Hou struct drm_gem_shmem_object *shmem; 549ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 550ac49797cSLizhi Hou 551ac49797cSLizhi Hou shmem = drm_gem_shmem_create(dev, args->size); 552ac49797cSLizhi Hou if (IS_ERR(shmem)) 553ac49797cSLizhi Hou return ERR_CAST(shmem); 554ac49797cSLizhi Hou 555ac49797cSLizhi Hou shmem->map_wc = false; 556ac49797cSLizhi Hou 557ac49797cSLizhi Hou abo = to_xdna_obj(&shmem->base); 558ac49797cSLizhi Hou abo->client = client; 559ac49797cSLizhi Hou abo->type = AMDXDNA_BO_SHMEM; 560ac49797cSLizhi Hou 561ac49797cSLizhi Hou return abo; 562ac49797cSLizhi Hou } 563ac49797cSLizhi Hou 564ac49797cSLizhi Hou static struct amdxdna_gem_obj * 565ac49797cSLizhi Hou amdxdna_drm_create_dev_heap(struct drm_device *dev, 566ac49797cSLizhi Hou struct amdxdna_drm_create_bo *args, 567ac49797cSLizhi Hou struct drm_file *filp) 568ac49797cSLizhi Hou { 569ac49797cSLizhi Hou struct amdxdna_client *client = filp->driver_priv; 570*e252e3f3SLizhi Hou struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 571ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 572ac49797cSLizhi Hou struct drm_gem_shmem_object *shmem; 573ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 574ac49797cSLizhi Hou int ret; 575ac49797cSLizhi Hou 576ac49797cSLizhi Hou if (args->size > xdna->dev_info->dev_mem_size) { 577ac49797cSLizhi Hou XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx", 578ac49797cSLizhi Hou args->size, xdna->dev_info->dev_mem_size); 579ac49797cSLizhi Hou return ERR_PTR(-EINVAL); 580ac49797cSLizhi Hou } 581ac49797cSLizhi Hou 582ac49797cSLizhi Hou mutex_lock(&client->mm_lock); 583ac49797cSLizhi Hou if (client->dev_heap) { 584ac49797cSLizhi Hou XDNA_DBG(client->xdna, "dev heap is already created"); 585ac49797cSLizhi Hou ret = -EBUSY; 586ac49797cSLizhi Hou goto mm_unlock; 587ac49797cSLizhi Hou } 588ac49797cSLizhi Hou 589ac49797cSLizhi Hou shmem = drm_gem_shmem_create(dev, args->size); 590ac49797cSLizhi Hou if (IS_ERR(shmem)) { 591ac49797cSLizhi Hou ret = PTR_ERR(shmem); 592ac49797cSLizhi Hou goto mm_unlock; 593ac49797cSLizhi Hou } 594ac49797cSLizhi Hou 595ac49797cSLizhi Hou shmem->map_wc = false; 596ac49797cSLizhi Hou abo = to_xdna_obj(&shmem->base); 597ac49797cSLizhi Hou abo->type = AMDXDNA_BO_DEV_HEAP; 598ac49797cSLizhi Hou abo->client = client; 599ac49797cSLizhi Hou abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; 600ac49797cSLizhi Hou drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); 601ac49797cSLizhi Hou 602*e252e3f3SLizhi Hou ret = drm_gem_vmap(to_gobj(abo), &map); 603*e252e3f3SLizhi Hou if (ret) { 604*e252e3f3SLizhi Hou XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); 605*e252e3f3SLizhi Hou goto release_obj; 606*e252e3f3SLizhi Hou } 607*e252e3f3SLizhi Hou abo->mem.kva = map.vaddr; 608*e252e3f3SLizhi Hou 609ac49797cSLizhi Hou client->dev_heap = abo; 610ac49797cSLizhi Hou drm_gem_object_get(to_gobj(abo)); 611ac49797cSLizhi Hou mutex_unlock(&client->mm_lock); 612ac49797cSLizhi Hou 613ac49797cSLizhi Hou return abo; 614ac49797cSLizhi Hou 615*e252e3f3SLizhi Hou release_obj: 616*e252e3f3SLizhi Hou drm_gem_object_put(to_gobj(abo)); 617ac49797cSLizhi Hou mm_unlock: 618ac49797cSLizhi Hou mutex_unlock(&client->mm_lock); 619ac49797cSLizhi Hou return ERR_PTR(ret); 620ac49797cSLizhi Hou } 621ac49797cSLizhi Hou 622ac49797cSLizhi Hou struct amdxdna_gem_obj * 623ac49797cSLizhi Hou amdxdna_drm_alloc_dev_bo(struct drm_device *dev, 624ac49797cSLizhi Hou struct amdxdna_drm_create_bo *args, 625*e252e3f3SLizhi Hou struct drm_file *filp) 626ac49797cSLizhi Hou { 627ac49797cSLizhi Hou struct amdxdna_client *client = filp->driver_priv; 628ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 629ac49797cSLizhi Hou size_t aligned_sz = PAGE_ALIGN(args->size); 630*e252e3f3SLizhi Hou struct amdxdna_gem_obj *abo; 631ac49797cSLizhi Hou int ret; 632ac49797cSLizhi Hou 633ac49797cSLizhi Hou abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz); 634*e252e3f3SLizhi Hou if (IS_ERR(abo)) 635*e252e3f3SLizhi Hou return abo; 636*e252e3f3SLizhi Hou 637ac49797cSLizhi Hou to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs; 638ac49797cSLizhi Hou abo->type = AMDXDNA_BO_DEV; 639ac49797cSLizhi Hou abo->client = client; 640*e252e3f3SLizhi Hou 641*e252e3f3SLizhi Hou ret = amdxdna_gem_heap_alloc(abo); 642ac49797cSLizhi Hou if (ret) { 643ac49797cSLizhi Hou XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret); 644*e252e3f3SLizhi Hou amdxdna_gem_destroy_obj(abo); 645*e252e3f3SLizhi Hou return ERR_PTR(ret); 646ac49797cSLizhi Hou } 647ac49797cSLizhi Hou 648ac49797cSLizhi Hou drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz); 649ac49797cSLizhi Hou 650ac49797cSLizhi Hou return abo; 651ac49797cSLizhi Hou } 652ac49797cSLizhi Hou 653ac49797cSLizhi Hou static struct amdxdna_gem_obj * 654ac49797cSLizhi Hou amdxdna_drm_create_cmd_bo(struct drm_device *dev, 655ac49797cSLizhi Hou struct amdxdna_drm_create_bo *args, 656ac49797cSLizhi Hou struct drm_file *filp) 657ac49797cSLizhi Hou { 658*e252e3f3SLizhi Hou struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); 659ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 660ac49797cSLizhi Hou struct drm_gem_shmem_object *shmem; 661ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 662ac49797cSLizhi Hou int ret; 663ac49797cSLizhi Hou 664ac49797cSLizhi Hou if (args->size > XDNA_MAX_CMD_BO_SIZE) { 665ac49797cSLizhi Hou XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size); 666ac49797cSLizhi Hou return ERR_PTR(-EINVAL); 667ac49797cSLizhi Hou } 668ac49797cSLizhi Hou 669ac49797cSLizhi Hou if (args->size < sizeof(struct amdxdna_cmd)) { 670ac49797cSLizhi Hou XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size); 671ac49797cSLizhi Hou return ERR_PTR(-EINVAL); 672ac49797cSLizhi Hou } 673ac49797cSLizhi Hou 674ac49797cSLizhi Hou shmem = drm_gem_shmem_create(dev, args->size); 675ac49797cSLizhi Hou if (IS_ERR(shmem)) 676ac49797cSLizhi Hou return ERR_CAST(shmem); 677ac49797cSLizhi Hou 678ac49797cSLizhi Hou shmem->map_wc = false; 679ac49797cSLizhi Hou abo = to_xdna_obj(&shmem->base); 680ac49797cSLizhi Hou 681ac49797cSLizhi Hou abo->type = AMDXDNA_BO_CMD; 682ac49797cSLizhi Hou abo->client = filp->driver_priv; 683ac49797cSLizhi Hou 684a600794aSBoris Brezillon ret = drm_gem_vmap(to_gobj(abo), &map); 685ac49797cSLizhi Hou if (ret) { 686ac49797cSLizhi Hou XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 687ac49797cSLizhi Hou goto release_obj; 688ac49797cSLizhi Hou } 689ac49797cSLizhi Hou abo->mem.kva = map.vaddr; 690ac49797cSLizhi Hou 691ac49797cSLizhi Hou return abo; 692ac49797cSLizhi Hou 693ac49797cSLizhi Hou release_obj: 694ac49797cSLizhi Hou drm_gem_shmem_free(shmem); 695ac49797cSLizhi Hou return ERR_PTR(ret); 696ac49797cSLizhi Hou } 697ac49797cSLizhi Hou 698ac49797cSLizhi Hou int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 699ac49797cSLizhi Hou { 700ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 701ac49797cSLizhi Hou struct amdxdna_drm_create_bo *args = data; 702ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 703ac49797cSLizhi Hou int ret; 704ac49797cSLizhi Hou 705ac49797cSLizhi Hou if (args->flags || args->vaddr || !args->size) 706ac49797cSLizhi Hou return -EINVAL; 707ac49797cSLizhi Hou 708ac49797cSLizhi Hou XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx", 709ac49797cSLizhi Hou args->type, args->vaddr, args->size, args->flags); 710ac49797cSLizhi Hou switch (args->type) { 711ac49797cSLizhi Hou case AMDXDNA_BO_SHMEM: 712ac49797cSLizhi Hou abo = amdxdna_drm_alloc_shmem(dev, args, filp); 713ac49797cSLizhi Hou break; 714ac49797cSLizhi Hou case AMDXDNA_BO_DEV_HEAP: 715ac49797cSLizhi Hou abo = amdxdna_drm_create_dev_heap(dev, args, filp); 716ac49797cSLizhi Hou break; 717ac49797cSLizhi Hou case AMDXDNA_BO_DEV: 718*e252e3f3SLizhi Hou abo = amdxdna_drm_alloc_dev_bo(dev, args, filp); 719ac49797cSLizhi Hou break; 720ac49797cSLizhi Hou case AMDXDNA_BO_CMD: 721ac49797cSLizhi Hou abo = amdxdna_drm_create_cmd_bo(dev, args, filp); 722ac49797cSLizhi Hou break; 723ac49797cSLizhi Hou default: 724ac49797cSLizhi Hou return -EINVAL; 725ac49797cSLizhi Hou } 726ac49797cSLizhi Hou if (IS_ERR(abo)) 727ac49797cSLizhi Hou return PTR_ERR(abo); 728ac49797cSLizhi Hou 729ac49797cSLizhi Hou /* ready to publish object to userspace */ 730ac49797cSLizhi Hou ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle); 731ac49797cSLizhi Hou if (ret) { 732ac49797cSLizhi Hou XDNA_ERR(xdna, "Create handle failed"); 733ac49797cSLizhi Hou goto put_obj; 734ac49797cSLizhi Hou } 735ac49797cSLizhi Hou 736ac49797cSLizhi Hou XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx", 737ac49797cSLizhi Hou args->handle, args->type, abo->mem.userptr, 738ac49797cSLizhi Hou abo->mem.dev_addr, abo->mem.size); 739ac49797cSLizhi Hou put_obj: 740ac49797cSLizhi Hou /* Dereference object reference. Handle holds it now. */ 741ac49797cSLizhi Hou drm_gem_object_put(to_gobj(abo)); 742ac49797cSLizhi Hou return ret; 743ac49797cSLizhi Hou } 744ac49797cSLizhi Hou 745ac49797cSLizhi Hou int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo) 746ac49797cSLizhi Hou { 747ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); 748ac49797cSLizhi Hou int ret; 749ac49797cSLizhi Hou 750*e252e3f3SLizhi Hou if (abo->type == AMDXDNA_BO_DEV) 751*e252e3f3SLizhi Hou abo = abo->client->dev_heap; 752*e252e3f3SLizhi Hou 753e486147cSLizhi Hou if (is_import_bo(abo)) 754e486147cSLizhi Hou return 0; 755e486147cSLizhi Hou 756ac49797cSLizhi Hou ret = drm_gem_shmem_pin(&abo->base); 757ac49797cSLizhi Hou 758ac49797cSLizhi Hou XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret); 759ac49797cSLizhi Hou return ret; 760ac49797cSLizhi Hou } 761ac49797cSLizhi Hou 762ac49797cSLizhi Hou int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) 763ac49797cSLizhi Hou { 764ac49797cSLizhi Hou int ret; 765ac49797cSLizhi Hou 766ac49797cSLizhi Hou mutex_lock(&abo->lock); 767ac49797cSLizhi Hou ret = amdxdna_gem_pin_nolock(abo); 768ac49797cSLizhi Hou mutex_unlock(&abo->lock); 769ac49797cSLizhi Hou 770ac49797cSLizhi Hou return ret; 771ac49797cSLizhi Hou } 772ac49797cSLizhi Hou 773ac49797cSLizhi Hou void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo) 774ac49797cSLizhi Hou { 775*e252e3f3SLizhi Hou if (abo->type == AMDXDNA_BO_DEV) 776*e252e3f3SLizhi Hou abo = abo->client->dev_heap; 777*e252e3f3SLizhi Hou 778e486147cSLizhi Hou if (is_import_bo(abo)) 779e486147cSLizhi Hou return; 780e486147cSLizhi Hou 781ac49797cSLizhi Hou mutex_lock(&abo->lock); 782ac49797cSLizhi Hou drm_gem_shmem_unpin(&abo->base); 783ac49797cSLizhi Hou mutex_unlock(&abo->lock); 784ac49797cSLizhi Hou } 785ac49797cSLizhi Hou 786ac49797cSLizhi Hou struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client, 787ac49797cSLizhi Hou u32 bo_hdl, u8 bo_type) 788ac49797cSLizhi Hou { 789ac49797cSLizhi Hou struct amdxdna_dev *xdna = client->xdna; 790ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 791ac49797cSLizhi Hou struct drm_gem_object *gobj; 792ac49797cSLizhi Hou 793ac49797cSLizhi Hou gobj = drm_gem_object_lookup(client->filp, bo_hdl); 794ac49797cSLizhi Hou if (!gobj) { 795ac49797cSLizhi Hou XDNA_DBG(xdna, "Can not find bo %d", bo_hdl); 796ac49797cSLizhi Hou return NULL; 797ac49797cSLizhi Hou } 798ac49797cSLizhi Hou 799ac49797cSLizhi Hou abo = to_xdna_obj(gobj); 800ac49797cSLizhi Hou if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type) 801ac49797cSLizhi Hou return abo; 802ac49797cSLizhi Hou 803ac49797cSLizhi Hou drm_gem_object_put(gobj); 804ac49797cSLizhi Hou return NULL; 805ac49797cSLizhi Hou } 806ac49797cSLizhi Hou 807ac49797cSLizhi Hou int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 808ac49797cSLizhi Hou { 809ac49797cSLizhi Hou struct amdxdna_drm_get_bo_info *args = data; 810ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 811ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 812ac49797cSLizhi Hou struct drm_gem_object *gobj; 813ac49797cSLizhi Hou int ret = 0; 814ac49797cSLizhi Hou 81503c318a0SLizhi Hou if (args->ext || args->ext_flags || args->pad) 816ac49797cSLizhi Hou return -EINVAL; 817ac49797cSLizhi Hou 818ac49797cSLizhi Hou gobj = drm_gem_object_lookup(filp, args->handle); 819ac49797cSLizhi Hou if (!gobj) { 820ac49797cSLizhi Hou XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle); 821ac49797cSLizhi Hou return -ENOENT; 822ac49797cSLizhi Hou } 823ac49797cSLizhi Hou 824ac49797cSLizhi Hou abo = to_xdna_obj(gobj); 825ac49797cSLizhi Hou args->vaddr = abo->mem.userptr; 826ac49797cSLizhi Hou args->xdna_addr = abo->mem.dev_addr; 827ac49797cSLizhi Hou 828ac49797cSLizhi Hou if (abo->type != AMDXDNA_BO_DEV) 829ac49797cSLizhi Hou args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node); 830ac49797cSLizhi Hou else 831ac49797cSLizhi Hou args->map_offset = AMDXDNA_INVALID_ADDR; 832ac49797cSLizhi Hou 833ac49797cSLizhi Hou XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx", 834ac49797cSLizhi Hou args->handle, args->map_offset, args->vaddr, args->xdna_addr); 835ac49797cSLizhi Hou 836ac49797cSLizhi Hou drm_gem_object_put(gobj); 837ac49797cSLizhi Hou return ret; 838ac49797cSLizhi Hou } 839ac49797cSLizhi Hou 840ac49797cSLizhi Hou /* 841ac49797cSLizhi Hou * The sync bo ioctl is to make sure the CPU cache is in sync with memory. 842ac49797cSLizhi Hou * This is required because NPU is not cache coherent device. CPU cache 843ac49797cSLizhi Hou * flushing/invalidation is expensive so it is best to handle this outside 844ac49797cSLizhi Hou * of the command submission path. This ioctl allows explicit cache 845ac49797cSLizhi Hou * flushing/invalidation outside of the critical path. 846ac49797cSLizhi Hou */ 847ac49797cSLizhi Hou int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, 848ac49797cSLizhi Hou void *data, struct drm_file *filp) 849ac49797cSLizhi Hou { 850ac49797cSLizhi Hou struct amdxdna_dev *xdna = to_xdna_dev(dev); 851ac49797cSLizhi Hou struct amdxdna_drm_sync_bo *args = data; 852ac49797cSLizhi Hou struct amdxdna_gem_obj *abo; 853ac49797cSLizhi Hou struct drm_gem_object *gobj; 854ac49797cSLizhi Hou int ret; 855ac49797cSLizhi Hou 856ac49797cSLizhi Hou gobj = drm_gem_object_lookup(filp, args->handle); 857ac49797cSLizhi Hou if (!gobj) { 858ac49797cSLizhi Hou XDNA_ERR(xdna, "Lookup GEM object failed"); 859ac49797cSLizhi Hou return -ENOENT; 860ac49797cSLizhi Hou } 861ac49797cSLizhi Hou abo = to_xdna_obj(gobj); 862ac49797cSLizhi Hou 863ac49797cSLizhi Hou ret = amdxdna_gem_pin(abo); 864ac49797cSLizhi Hou if (ret) { 865ac49797cSLizhi Hou XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret); 866ac49797cSLizhi Hou goto put_obj; 867ac49797cSLizhi Hou } 868ac49797cSLizhi Hou 869e486147cSLizhi Hou if (is_import_bo(abo)) 870e486147cSLizhi Hou drm_clflush_sg(abo->base.sgt); 871*e252e3f3SLizhi Hou else if (abo->mem.kva) 872*e252e3f3SLizhi Hou drm_clflush_virt_range(abo->mem.kva + args->offset, args->size); 873*e252e3f3SLizhi Hou else if (abo->base.pages) 874ac49797cSLizhi Hou drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT); 875*e252e3f3SLizhi Hou else 876*e252e3f3SLizhi Hou drm_WARN(&xdna->ddev, 1, "Can not get flush memory"); 877ac49797cSLizhi Hou 878ac49797cSLizhi Hou amdxdna_gem_unpin(abo); 879ac49797cSLizhi Hou 880ac49797cSLizhi Hou XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n", 881ac49797cSLizhi Hou args->handle, args->offset, args->size); 882ac49797cSLizhi Hou 883ac49797cSLizhi Hou put_obj: 884ac49797cSLizhi Hou drm_gem_object_put(gobj); 885ac49797cSLizhi Hou return ret; 886ac49797cSLizhi Hou } 887