1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2015-2018 Broadcom */ 3 4 /** 5 * DOC: V3D GEM BO management support 6 * 7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the 8 * GPU and the bus, allowing us to use shmem objects for our storage 9 * instead of CMA. 10 * 11 * Physically contiguous objects may still be imported to V3D, but the 12 * driver doesn't allocate physically contiguous objects on its own. 13 * Display engines requiring physically contiguous allocations should 14 * look into Mesa's "renderonly" support (as used by the Mesa pl111 15 * driver) for an example of how to integrate with V3D. 16 */ 17 18 #include <linux/dma-buf.h> 19 #include <linux/vmalloc.h> 20 21 #include <drm/drm_print.h> 22 23 #include "v3d_drv.h" 24 #include "uapi/drm/v3d_drm.h" 25 26 static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj) 27 { 28 struct v3d_bo *bo = to_v3d_bo(obj); 29 enum drm_gem_object_status res = 0; 30 31 if (bo->base.pages) 32 res |= DRM_GEM_OBJECT_RESIDENT; 33 34 return res; 35 } 36 37 /* Called DRM core on the last userspace/kernel unreference of the 38 * BO. 39 */ 40 void v3d_free_object(struct drm_gem_object *obj) 41 { 42 struct v3d_dev *v3d = to_v3d_dev(obj->dev); 43 struct v3d_bo *bo = to_v3d_bo(obj); 44 45 if (bo->vaddr) 46 v3d_put_bo_vaddr(bo); 47 48 v3d_mmu_remove_ptes(bo); 49 50 mutex_lock(&v3d->bo_lock); 51 v3d->bo_stats.num_allocated--; 52 v3d->bo_stats.pages_allocated -= obj->size >> V3D_MMU_PAGE_SHIFT; 53 mutex_unlock(&v3d->bo_lock); 54 55 spin_lock(&v3d->mm_lock); 56 drm_mm_remove_node(&bo->node); 57 spin_unlock(&v3d->mm_lock); 58 59 /* GPU execution may have dirtied any pages in the BO. */ 60 bo->base.pages_mark_dirty_on_put = true; 61 62 drm_gem_shmem_free(&bo->base); 63 } 64 65 static const struct drm_gem_object_funcs v3d_gem_funcs = { 66 .free = v3d_free_object, 67 .print_info = drm_gem_shmem_object_print_info, 68 .pin = drm_gem_shmem_object_pin, 69 .unpin = drm_gem_shmem_object_unpin, 70 .get_sg_table = drm_gem_shmem_object_get_sg_table, 71 .vmap = drm_gem_shmem_object_vmap, 72 .vunmap = drm_gem_shmem_object_vunmap, 73 .mmap = drm_gem_shmem_object_mmap, 74 .status = v3d_gem_status, 75 .vm_ops = &drm_gem_shmem_vm_ops, 76 }; 77 78 /* gem_create_object function for allocating a BO struct and doing 79 * early setup. 80 */ 81 struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) 82 { 83 struct v3d_bo *bo; 84 struct drm_gem_object *obj; 85 86 if (size == 0) 87 return ERR_PTR(-EINVAL); 88 89 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 90 if (!bo) 91 return ERR_PTR(-ENOMEM); 92 obj = &bo->base.base; 93 94 obj->funcs = &v3d_gem_funcs; 95 bo->base.map_wc = true; 96 INIT_LIST_HEAD(&bo->unref_head); 97 98 return &bo->base.base; 99 } 100 101 static int 102 v3d_bo_create_finish(struct drm_gem_object *obj) 103 { 104 struct v3d_dev *v3d = to_v3d_dev(obj->dev); 105 struct v3d_bo *bo = to_v3d_bo(obj); 106 struct sg_table *sgt; 107 u64 align; 108 int ret; 109 110 /* So far we pin the BO in the MMU for its lifetime, so use 111 * shmem's helper for getting a lifetime sgt. 112 */ 113 sgt = drm_gem_shmem_get_pages_sgt(&bo->base); 114 if (IS_ERR(sgt)) 115 return PTR_ERR(sgt); 116 117 if (!v3d->gemfs) 118 align = SZ_4K; 119 else if (obj->size >= SZ_1M) 120 align = SZ_1M; 121 else if (obj->size >= SZ_64K) 122 align = SZ_64K; 123 else 124 align = SZ_4K; 125 126 spin_lock(&v3d->mm_lock); 127 /* Allocate the object's space in the GPU's page tables. 128 * Inserting PTEs will happen later, but the offset is for the 129 * lifetime of the BO. 130 */ 131 ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, 132 obj->size >> V3D_MMU_PAGE_SHIFT, 133 align >> V3D_MMU_PAGE_SHIFT, 0, 0); 134 spin_unlock(&v3d->mm_lock); 135 if (ret) 136 return ret; 137 138 /* Track stats for /debug/dri/n/bo_stats. */ 139 mutex_lock(&v3d->bo_lock); 140 v3d->bo_stats.num_allocated++; 141 v3d->bo_stats.pages_allocated += obj->size >> V3D_MMU_PAGE_SHIFT; 142 mutex_unlock(&v3d->bo_lock); 143 144 v3d_mmu_insert_ptes(bo); 145 146 return 0; 147 } 148 149 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 150 size_t unaligned_size) 151 { 152 struct drm_gem_shmem_object *shmem_obj; 153 struct v3d_dev *v3d = to_v3d_dev(dev); 154 struct v3d_bo *bo; 155 int ret; 156 157 shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, 158 v3d->gemfs); 159 if (IS_ERR(shmem_obj)) 160 return ERR_CAST(shmem_obj); 161 bo = to_v3d_bo(&shmem_obj->base); 162 bo->vaddr = NULL; 163 164 ret = v3d_bo_create_finish(&shmem_obj->base); 165 if (ret) 166 goto free_obj; 167 168 return bo; 169 170 free_obj: 171 drm_gem_shmem_free(shmem_obj); 172 return ERR_PTR(ret); 173 } 174 175 struct drm_gem_object * 176 v3d_prime_import_sg_table(struct drm_device *dev, 177 struct dma_buf_attachment *attach, 178 struct sg_table *sgt) 179 { 180 struct drm_gem_object *obj; 181 int ret; 182 183 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); 184 if (IS_ERR(obj)) 185 return obj; 186 187 ret = v3d_bo_create_finish(obj); 188 if (ret) { 189 drm_gem_shmem_free(&to_v3d_bo(obj)->base); 190 return ERR_PTR(ret); 191 } 192 193 return obj; 194 } 195 196 void v3d_get_bo_vaddr(struct v3d_bo *bo) 197 { 198 struct drm_gem_shmem_object *obj = &bo->base; 199 200 bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, 201 pgprot_writecombine(PAGE_KERNEL)); 202 } 203 204 void v3d_put_bo_vaddr(struct v3d_bo *bo) 205 { 206 vunmap(bo->vaddr); 207 bo->vaddr = NULL; 208 } 209 210 int v3d_create_bo_ioctl(struct drm_device *dev, void *data, 211 struct drm_file *file_priv) 212 { 213 struct drm_v3d_create_bo *args = data; 214 struct v3d_bo *bo = NULL; 215 int ret; 216 217 if (args->flags != 0) { 218 DRM_INFO("unknown create_bo flags: %d\n", args->flags); 219 return -EINVAL; 220 } 221 222 bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); 223 if (IS_ERR(bo)) 224 return PTR_ERR(bo); 225 226 args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; 227 228 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 229 drm_gem_object_put(&bo->base.base); 230 231 return ret; 232 } 233 234 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, 235 struct drm_file *file_priv) 236 { 237 struct drm_v3d_mmap_bo *args = data; 238 struct drm_gem_object *gem_obj; 239 240 if (args->flags != 0) { 241 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 242 return -EINVAL; 243 } 244 245 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 246 if (!gem_obj) { 247 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 248 return -ENOENT; 249 } 250 251 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 252 drm_gem_object_put(gem_obj); 253 254 return 0; 255 } 256 257 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 258 struct drm_file *file_priv) 259 { 260 struct drm_v3d_get_bo_offset *args = data; 261 struct drm_gem_object *gem_obj; 262 struct v3d_bo *bo; 263 264 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 265 if (!gem_obj) { 266 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 267 return -ENOENT; 268 } 269 bo = to_v3d_bo(gem_obj); 270 271 args->offset = bo->node.start << V3D_MMU_PAGE_SHIFT; 272 273 drm_gem_object_put(gem_obj); 274 return 0; 275 } 276 277 int 278 v3d_wait_bo_ioctl(struct drm_device *dev, void *data, 279 struct drm_file *file_priv) 280 { 281 int ret; 282 struct drm_v3d_wait_bo *args = data; 283 ktime_t start = ktime_get(); 284 u64 delta_ns; 285 unsigned long timeout_jiffies = 286 nsecs_to_jiffies_timeout(args->timeout_ns); 287 288 if (args->pad != 0) 289 return -EINVAL; 290 291 ret = drm_gem_dma_resv_wait(file_priv, args->handle, 292 true, timeout_jiffies); 293 294 /* Decrement the user's timeout, in case we got interrupted 295 * such that the ioctl will be restarted. 296 */ 297 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); 298 if (delta_ns < args->timeout_ns) 299 args->timeout_ns -= delta_ns; 300 else 301 args->timeout_ns = 0; 302 303 /* Asked to wait beyond the jiffy/scheduler precision? */ 304 if (ret == -ETIME && args->timeout_ns) 305 ret = -EAGAIN; 306 307 return ret; 308 } 309