1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ 3 4 #include <drm/drm_device.h> 5 #include <drm/drm_utils.h> 6 #include <drm/rocket_accel.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/iommu.h> 9 10 #include "rocket_drv.h" 11 #include "rocket_gem.h" 12 13 static void rocket_gem_bo_free(struct drm_gem_object *obj) 14 { 15 struct rocket_gem_object *bo = to_rocket_bo(obj); 16 struct rocket_file_priv *rocket_priv = bo->driver_priv; 17 size_t unmapped; 18 19 drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); 20 21 unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size); 22 drm_WARN_ON(obj->dev, unmapped != bo->size); 23 24 mutex_lock(&rocket_priv->mm_lock); 25 drm_mm_remove_node(&bo->mm); 26 mutex_unlock(&rocket_priv->mm_lock); 27 28 rocket_iommu_domain_put(bo->domain); 29 bo->domain = NULL; 30 31 drm_gem_shmem_free(&bo->base); 32 } 33 34 static const struct drm_gem_object_funcs rocket_gem_funcs = { 35 .free = rocket_gem_bo_free, 36 .print_info = drm_gem_shmem_object_print_info, 37 .pin = drm_gem_shmem_object_pin, 38 .unpin = drm_gem_shmem_object_unpin, 39 .get_sg_table = drm_gem_shmem_object_get_sg_table, 40 .vmap = drm_gem_shmem_object_vmap, 41 .vunmap = drm_gem_shmem_object_vunmap, 42 .mmap = drm_gem_shmem_object_mmap, 43 .vm_ops = &drm_gem_shmem_vm_ops, 44 }; 45 46 struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size) 47 { 48 struct rocket_gem_object *obj; 49 50 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 51 if (!obj) 52 return ERR_PTR(-ENOMEM); 53 54 obj->base.base.funcs = &rocket_gem_funcs; 55 56 return &obj->base.base; 57 } 58 59 int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) 60 { 61 struct rocket_file_priv *rocket_priv = file->driver_priv; 62 struct drm_rocket_create_bo *args = data; 63 struct drm_gem_shmem_object *shmem_obj; 64 struct rocket_gem_object *rkt_obj; 65 struct drm_gem_object *gem_obj; 66 struct sg_table *sgt; 67 int ret; 68 69 shmem_obj = drm_gem_shmem_create(dev, args->size); 70 if (IS_ERR(shmem_obj)) 71 return PTR_ERR(shmem_obj); 72 73 gem_obj = &shmem_obj->base; 74 rkt_obj = to_rocket_bo(gem_obj); 75 76 rkt_obj->driver_priv = rocket_priv; 77 rkt_obj->domain = rocket_iommu_domain_get(rocket_priv); 78 rkt_obj->size = args->size; 79 rkt_obj->offset = 0; 80 81 ret = drm_gem_handle_create(file, gem_obj, &args->handle); 82 drm_gem_object_put(gem_obj); 83 if (ret) 84 goto err; 85 86 sgt = drm_gem_shmem_get_pages_sgt(shmem_obj); 87 if (IS_ERR(sgt)) { 88 ret = PTR_ERR(sgt); 89 goto err; 90 } 91 92 mutex_lock(&rocket_priv->mm_lock); 93 ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm, 94 rkt_obj->size, PAGE_SIZE, 95 0, 0); 96 mutex_unlock(&rocket_priv->mm_lock); 97 98 ret = iommu_map_sgtable(rocket_priv->domain->domain, 99 rkt_obj->mm.start, 100 shmem_obj->sgt, 101 IOMMU_READ | IOMMU_WRITE); 102 if (ret < 0 || ret < args->size) { 103 drm_err(dev, "failed to map buffer: size=%d request_size=%u\n", 104 ret, args->size); 105 ret = -ENOMEM; 106 goto err_remove_node; 107 } 108 109 /* iommu_map_sgtable might have aligned the size */ 110 rkt_obj->size = ret; 111 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 112 args->dma_address = rkt_obj->mm.start; 113 114 return 0; 115 116 err_remove_node: 117 mutex_lock(&rocket_priv->mm_lock); 118 drm_mm_remove_node(&rkt_obj->mm); 119 mutex_unlock(&rocket_priv->mm_lock); 120 121 err: 122 drm_gem_shmem_object_free(gem_obj); 123 124 return ret; 125 } 126 127 int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file) 128 { 129 struct drm_rocket_prep_bo *args = data; 130 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 131 struct drm_gem_object *gem_obj; 132 struct drm_gem_shmem_object *shmem_obj; 133 long ret = 0; 134 135 if (args->reserved != 0) { 136 drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n"); 137 return -EINVAL; 138 } 139 140 gem_obj = drm_gem_object_lookup(file, args->handle); 141 if (!gem_obj) 142 return -ENOENT; 143 144 ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout); 145 if (!ret) 146 ret = timeout ? -ETIMEDOUT : -EBUSY; 147 148 shmem_obj = &to_rocket_bo(gem_obj)->base; 149 150 dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); 151 152 drm_gem_object_put(gem_obj); 153 154 return ret; 155 } 156 157 int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file) 158 { 159 struct drm_rocket_fini_bo *args = data; 160 struct drm_gem_shmem_object *shmem_obj; 161 struct rocket_gem_object *rkt_obj; 162 struct drm_gem_object *gem_obj; 163 164 if (args->reserved != 0) { 165 drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n"); 166 return -EINVAL; 167 } 168 169 gem_obj = drm_gem_object_lookup(file, args->handle); 170 if (!gem_obj) 171 return -ENOENT; 172 173 rkt_obj = to_rocket_bo(gem_obj); 174 shmem_obj = &rkt_obj->base; 175 176 dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); 177 178 drm_gem_object_put(gem_obj); 179 180 return 0; 181 } 182