1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 3 /* Copyright 2023 Collabora ltd. */ 4 5 #include <linux/cleanup.h> 6 #include <linux/dma-buf.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/slab.h> 10 11 #include <drm/panthor_drm.h> 12 13 #include "panthor_device.h" 14 #include "panthor_fw.h" 15 #include "panthor_gem.h" 16 #include "panthor_mmu.h" 17 18 #ifdef CONFIG_DEBUG_FS 19 static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, 20 struct panthor_gem_object *bo) 21 { 22 INIT_LIST_HEAD(&bo->debugfs.node); 23 24 bo->debugfs.creator.tgid = current->group_leader->pid; 25 get_task_comm(bo->debugfs.creator.process_name, current->group_leader); 26 27 mutex_lock(&ptdev->gems.lock); 28 list_add_tail(&bo->debugfs.node, &ptdev->gems.node); 29 mutex_unlock(&ptdev->gems.lock); 30 } 31 32 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) 33 { 34 struct panthor_device *ptdev = container_of(bo->base.base.dev, 35 struct panthor_device, base); 36 37 if (list_empty(&bo->debugfs.node)) 38 return; 39 40 mutex_lock(&ptdev->gems.lock); 41 list_del_init(&bo->debugfs.node); 42 mutex_unlock(&ptdev->gems.lock); 43 } 44 45 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) 46 { 47 bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; 48 } 49 #else 50 static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, 51 struct panthor_gem_object *bo) 52 {} 53 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} 54 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {} 55 #endif 56 57 static void panthor_gem_free_object(struct drm_gem_object *obj) 58 { 59 struct panthor_gem_object *bo = to_panthor_bo(obj); 60 struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem; 61 62 panthor_gem_debugfs_bo_rm(bo); 63 64 /* 65 * Label might have been allocated with kstrdup_const(), 66 * we need to take that into account when freeing the memory 67 */ 68 kfree_const(bo->label.str); 69 70 mutex_destroy(&bo->label.lock); 71 72 drm_gem_free_mmap_offset(&bo->base.base); 73 mutex_destroy(&bo->gpuva_list_lock); 74 drm_gem_shmem_free(&bo->base); 75 drm_gem_object_put(vm_root_gem); 76 } 77 78 /** 79 * panthor_kernel_bo_destroy() - Destroy a kernel buffer object 80 * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction 81 * is skipped. 82 */ 83 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) 84 { 85 struct panthor_vm *vm; 86 int ret; 87 88 if (IS_ERR_OR_NULL(bo)) 89 return; 90 91 vm = bo->vm; 92 panthor_kernel_bo_vunmap(bo); 93 94 if (drm_WARN_ON(bo->obj->dev, 95 to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) 96 goto out_free_bo; 97 98 ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); 99 if (ret) 100 goto out_free_bo; 101 102 panthor_vm_free_va(vm, &bo->va_node); 103 drm_gem_object_put(bo->obj); 104 105 out_free_bo: 106 panthor_vm_put(vm); 107 kfree(bo); 108 } 109 110 /** 111 * panthor_kernel_bo_create() - Create and map a GEM object to a VM 112 * @ptdev: Device. 113 * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped. 114 * @size: Size of the buffer object. 115 * @bo_flags: Combination of drm_panthor_bo_flags flags. 116 * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those 117 * that are related to map operations). 118 * @gpu_va: GPU address assigned when mapping to the VM. 119 * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be 120 * automatically allocated. 121 * @name: Descriptive label of the BO's contents 122 * 123 * Return: A valid pointer in case of success, an ERR_PTR() otherwise. 124 */ 125 struct panthor_kernel_bo * 126 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, 127 size_t size, u32 bo_flags, u32 vm_map_flags, 128 u64 gpu_va, const char *name) 129 { 130 struct drm_gem_shmem_object *obj; 131 struct panthor_kernel_bo *kbo; 132 struct panthor_gem_object *bo; 133 u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL; 134 int ret; 135 136 if (drm_WARN_ON(&ptdev->base, !vm)) 137 return ERR_PTR(-EINVAL); 138 139 kbo = kzalloc(sizeof(*kbo), GFP_KERNEL); 140 if (!kbo) 141 return ERR_PTR(-ENOMEM); 142 143 obj = drm_gem_shmem_create(&ptdev->base, size); 144 if (IS_ERR(obj)) { 145 ret = PTR_ERR(obj); 146 goto err_free_bo; 147 } 148 149 bo = to_panthor_bo(&obj->base); 150 kbo->obj = &obj->base; 151 bo->flags = bo_flags; 152 153 if (vm == panthor_fw_vm(ptdev)) 154 debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; 155 156 panthor_gem_kernel_bo_set_label(kbo, name); 157 panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags); 158 159 /* The system and GPU MMU page size might differ, which becomes a 160 * problem for FW sections that need to be mapped at explicit address 161 * since our PAGE_SIZE alignment might cover a VA range that's 162 * expected to be used for another section. 163 * Make sure we never map more than we need. 164 */ 165 size = ALIGN(size, panthor_vm_page_size(vm)); 166 ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); 167 if (ret) 168 goto err_put_obj; 169 170 ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags); 171 if (ret) 172 goto err_free_va; 173 174 kbo->vm = panthor_vm_get(vm); 175 bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); 176 drm_gem_object_get(bo->exclusive_vm_root_gem); 177 bo->base.base.resv = bo->exclusive_vm_root_gem->resv; 178 return kbo; 179 180 err_free_va: 181 panthor_vm_free_va(vm, &kbo->va_node); 182 183 err_put_obj: 184 drm_gem_object_put(&obj->base); 185 186 err_free_bo: 187 kfree(kbo); 188 return ERR_PTR(ret); 189 } 190 191 static struct dma_buf * 192 panthor_gem_prime_export(struct drm_gem_object *obj, int flags) 193 { 194 /* We can't export GEMs that have an exclusive VM. */ 195 if (to_panthor_bo(obj)->exclusive_vm_root_gem) 196 return ERR_PTR(-EINVAL); 197 198 return drm_gem_prime_export(obj, flags); 199 } 200 201 static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) 202 { 203 struct panthor_gem_object *bo = to_panthor_bo(obj); 204 enum drm_gem_object_status res = 0; 205 206 if (drm_gem_is_imported(&bo->base.base) || bo->base.pages) 207 res |= DRM_GEM_OBJECT_RESIDENT; 208 209 return res; 210 } 211 212 static const struct drm_gem_object_funcs panthor_gem_funcs = { 213 .free = panthor_gem_free_object, 214 .print_info = drm_gem_shmem_object_print_info, 215 .pin = drm_gem_shmem_object_pin, 216 .unpin = drm_gem_shmem_object_unpin, 217 .get_sg_table = drm_gem_shmem_object_get_sg_table, 218 .vmap = drm_gem_shmem_object_vmap, 219 .vunmap = drm_gem_shmem_object_vunmap, 220 .mmap = drm_gem_shmem_object_mmap, 221 .status = panthor_gem_status, 222 .export = panthor_gem_prime_export, 223 .vm_ops = &drm_gem_shmem_vm_ops, 224 }; 225 226 /** 227 * panthor_gem_create_object - Implementation of driver->gem_create_object. 228 * @ddev: DRM device 229 * @size: Size in bytes of the memory the object will reference 230 * 231 * This lets the GEM helpers allocate object structs for us, and keep 232 * our BO stats correct. 233 */ 234 struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size) 235 { 236 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); 237 struct panthor_gem_object *obj; 238 239 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 240 if (!obj) 241 return ERR_PTR(-ENOMEM); 242 243 obj->base.base.funcs = &panthor_gem_funcs; 244 obj->base.map_wc = !ptdev->coherent; 245 mutex_init(&obj->gpuva_list_lock); 246 drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); 247 mutex_init(&obj->label.lock); 248 249 panthor_gem_debugfs_bo_add(ptdev, obj); 250 251 return &obj->base.base; 252 } 253 254 /** 255 * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle. 256 * @file: DRM file. 257 * @ddev: DRM device. 258 * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared. 259 * @size: Size of the GEM object to allocate. 260 * @flags: Combination of drm_panthor_bo_flags flags. 261 * @handle: Pointer holding the handle pointing to the new GEM object. 262 * 263 * Return: Zero on success 264 */ 265 int 266 panthor_gem_create_with_handle(struct drm_file *file, 267 struct drm_device *ddev, 268 struct panthor_vm *exclusive_vm, 269 u64 *size, u32 flags, u32 *handle) 270 { 271 int ret; 272 struct drm_gem_shmem_object *shmem; 273 struct panthor_gem_object *bo; 274 275 shmem = drm_gem_shmem_create(ddev, *size); 276 if (IS_ERR(shmem)) 277 return PTR_ERR(shmem); 278 279 bo = to_panthor_bo(&shmem->base); 280 bo->flags = flags; 281 282 if (exclusive_vm) { 283 bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm); 284 drm_gem_object_get(bo->exclusive_vm_root_gem); 285 bo->base.base.resv = bo->exclusive_vm_root_gem->resv; 286 } 287 288 /* 289 * Allocate an id of idr table where the obj is registered 290 * and handle has the id what user can see. 291 */ 292 ret = drm_gem_handle_create(file, &shmem->base, handle); 293 if (!ret) 294 *size = bo->base.base.size; 295 296 /* drop reference from allocate - handle holds it now. */ 297 drm_gem_object_put(&shmem->base); 298 299 /* 300 * No explicit flags are needed in the call below, since the 301 * function internally sets the INITIALIZED bit for us. 302 */ 303 panthor_gem_debugfs_set_usage_flags(bo, 0); 304 305 return ret; 306 } 307 308 void 309 panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label) 310 { 311 struct panthor_gem_object *bo = to_panthor_bo(obj); 312 const char *old_label; 313 314 scoped_guard(mutex, &bo->label.lock) { 315 old_label = bo->label.str; 316 bo->label.str = label; 317 } 318 319 kfree_const(old_label); 320 } 321 322 void 323 panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label) 324 { 325 const char *str; 326 327 /* We should never attempt labelling a UM-exposed GEM object */ 328 if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0)) 329 return; 330 331 if (!label) 332 return; 333 334 str = kstrdup_const(label, GFP_KERNEL); 335 if (!str) { 336 /* Failing to allocate memory for a label isn't a fatal condition */ 337 drm_warn(bo->obj->dev, "Not enough memory to allocate BO label"); 338 return; 339 } 340 341 panthor_gem_bo_set_label(bo->obj, str); 342 } 343 344 #ifdef CONFIG_DEBUG_FS 345 struct gem_size_totals { 346 size_t size; 347 size_t resident; 348 size_t reclaimable; 349 }; 350 351 static void panthor_gem_debugfs_print_flag_names(struct seq_file *m) 352 { 353 int len; 354 int i; 355 356 static const char * const gem_state_flags_names[] = { 357 [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported", 358 [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported", 359 }; 360 361 static const char * const gem_usage_flags_names[] = { 362 [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel", 363 [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped", 364 }; 365 366 seq_puts(m, "GEM state flags: "); 367 for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) { 368 if (!gem_state_flags_names[i]) 369 continue; 370 seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i], 371 (u32)BIT(i), (i < len - 1) ? ", " : "\n"); 372 } 373 374 seq_puts(m, "GEM usage flags: "); 375 for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) { 376 if (!gem_usage_flags_names[i]) 377 continue; 378 seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i], 379 (u32)BIT(i), (i < len - 1) ? ", " : "\n\n"); 380 } 381 } 382 383 static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo, 384 struct seq_file *m, 385 struct gem_size_totals *totals) 386 { 387 unsigned int refcount = kref_read(&bo->base.base.refcount); 388 char creator_info[32] = {}; 389 size_t resident_size; 390 u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; 391 u32 gem_state_flags = 0; 392 393 /* Skip BOs being destroyed. */ 394 if (!refcount) 395 return; 396 397 resident_size = bo->base.pages ? bo->base.base.size : 0; 398 399 snprintf(creator_info, sizeof(creator_info), 400 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); 401 seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx", 402 creator_info, 403 bo->base.base.name, 404 refcount, 405 bo->base.base.size, 406 resident_size, 407 drm_vma_node_start(&bo->base.base.vma_node)); 408 409 if (bo->base.base.import_attach) 410 gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED; 411 if (bo->base.base.dma_buf) 412 gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED; 413 414 seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags); 415 416 scoped_guard(mutex, &bo->label.lock) { 417 seq_printf(m, "%s\n", bo->label.str ? : ""); 418 } 419 420 totals->size += bo->base.base.size; 421 totals->resident += resident_size; 422 if (bo->base.madv > 0) 423 totals->reclaimable += resident_size; 424 } 425 426 void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev, 427 struct seq_file *m) 428 { 429 struct gem_size_totals totals = {0}; 430 struct panthor_gem_object *bo; 431 432 panthor_gem_debugfs_print_flag_names(m); 433 434 seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n"); 435 seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n"); 436 437 scoped_guard(mutex, &ptdev->gems.lock) { 438 list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { 439 if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED) 440 panthor_gem_debugfs_bo_print(bo, m, &totals); 441 } 442 } 443 444 seq_puts(m, "==============================================================================================================================================\n"); 445 seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n", 446 totals.size, totals.resident, totals.reclaimable); 447 } 448 #endif 449