xref: /linux/drivers/accel/amdxdna/amdxdna_gem.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_cache.h>
8 #include <drm/drm_device.h>
9 #include <drm/drm_gem.h>
10 #include <drm/drm_gem_shmem_helper.h>
11 #include <drm/gpu_scheduler.h>
12 #include <linux/iosys-map.h>
13 #include <linux/vmalloc.h>
14 
15 #include "amdxdna_ctx.h"
16 #include "amdxdna_gem.h"
17 #include "amdxdna_pci_drv.h"
18 
19 #define XDNA_MAX_CMD_BO_SIZE	SZ_32K
20 
21 static int
22 amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
23 {
24 	struct amdxdna_client *client = abo->client;
25 	struct amdxdna_dev *xdna = client->xdna;
26 	struct amdxdna_mem *mem = &abo->mem;
27 	u64 offset;
28 	u32 align;
29 	int ret;
30 
31 	align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
32 	ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
33 					 mem->size, align,
34 					 0, DRM_MM_INSERT_BEST);
35 	if (ret) {
36 		XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
37 		return ret;
38 	}
39 
40 	mem->dev_addr = abo->mm_node.start;
41 	offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
42 	mem->userptr = abo->dev_heap->mem.userptr + offset;
43 	mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
44 	mem->nr_pages = mem->size >> PAGE_SHIFT;
45 
46 	if (use_vmap) {
47 		mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
48 		if (!mem->kva) {
49 			XDNA_ERR(xdna, "Failed to vmap");
50 			drm_mm_remove_node(&abo->mm_node);
51 			return -EFAULT;
52 		}
53 	}
54 
55 	return 0;
56 }
57 
58 static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
59 {
60 	struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
61 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
62 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
63 
64 	XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
65 	if (abo->pinned)
66 		amdxdna_gem_unpin(abo);
67 
68 	if (abo->type == AMDXDNA_BO_DEV) {
69 		mutex_lock(&abo->client->mm_lock);
70 		drm_mm_remove_node(&abo->mm_node);
71 		mutex_unlock(&abo->client->mm_lock);
72 
73 		vunmap(abo->mem.kva);
74 		drm_gem_object_put(to_gobj(abo->dev_heap));
75 		drm_gem_object_release(gobj);
76 		mutex_destroy(&abo->lock);
77 		kfree(abo);
78 		return;
79 	}
80 
81 	if (abo->type == AMDXDNA_BO_DEV_HEAP)
82 		drm_mm_takedown(&abo->mm);
83 
84 	drm_gem_vunmap_unlocked(gobj, &map);
85 	mutex_destroy(&abo->lock);
86 	drm_gem_shmem_free(&abo->base);
87 }
88 
89 static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
90 	.free = amdxdna_gem_obj_free,
91 };
92 
93 static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
94 				   const struct mmu_notifier_range *range,
95 				   unsigned long cur_seq)
96 {
97 	struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
98 						   mem.notifier);
99 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
100 
101 	XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
102 		 abo->mem.userptr, abo->mem.size, abo->type);
103 
104 	if (!mmu_notifier_range_blockable(range))
105 		return false;
106 
107 	xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
108 
109 	return true;
110 }
111 
112 static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
113 	.invalidate = amdxdna_hmm_invalidate,
114 };
115 
116 static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
117 {
118 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
119 
120 	if (!xdna->dev_info->ops->hmm_invalidate)
121 		return;
122 
123 	mmu_interval_notifier_remove(&abo->mem.notifier);
124 	kvfree(abo->mem.pfns);
125 	abo->mem.pfns = NULL;
126 }
127 
128 static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
129 				size_t len)
130 {
131 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
132 	u32 nr_pages;
133 	int ret;
134 
135 	if (!xdna->dev_info->ops->hmm_invalidate)
136 		return 0;
137 
138 	if (abo->mem.pfns)
139 		return -EEXIST;
140 
141 	nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
142 	abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
143 				 GFP_KERNEL);
144 	if (!abo->mem.pfns)
145 		return -ENOMEM;
146 
147 	ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
148 						  current->mm,
149 						  addr,
150 						  len,
151 						  &amdxdna_hmm_ops);
152 	if (ret) {
153 		XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
154 		kvfree(abo->mem.pfns);
155 	}
156 	abo->mem.userptr = addr;
157 
158 	return ret;
159 }
160 
161 static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
162 				struct vm_area_struct *vma)
163 {
164 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
165 	unsigned long num_pages;
166 	int ret;
167 
168 	ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
169 	if (ret)
170 		return ret;
171 
172 	ret = drm_gem_shmem_mmap(&abo->base, vma);
173 	if (ret)
174 		goto hmm_unreg;
175 
176 	num_pages = gobj->size >> PAGE_SHIFT;
177 	/* Try to insert the pages */
178 	vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
179 	ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
180 	if (ret)
181 		XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
182 
183 	return 0;
184 
185 hmm_unreg:
186 	amdxdna_hmm_unregister(abo);
187 	return ret;
188 }
189 
190 static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
191 {
192 	return drm_gem_shmem_vm_ops.fault(vmf);
193 }
194 
195 static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
196 {
197 	drm_gem_shmem_vm_ops.open(vma);
198 }
199 
200 static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
201 {
202 	struct drm_gem_object *gobj = vma->vm_private_data;
203 
204 	amdxdna_hmm_unregister(to_xdna_obj(gobj));
205 	drm_gem_shmem_vm_ops.close(vma);
206 }
207 
208 static const struct vm_operations_struct amdxdna_gem_vm_ops = {
209 	.fault = amdxdna_gem_vm_fault,
210 	.open = amdxdna_gem_vm_open,
211 	.close = amdxdna_gem_vm_close,
212 };
213 
214 static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
215 	.free = amdxdna_gem_obj_free,
216 	.print_info = drm_gem_shmem_object_print_info,
217 	.pin = drm_gem_shmem_object_pin,
218 	.unpin = drm_gem_shmem_object_unpin,
219 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
220 	.vmap = drm_gem_shmem_object_vmap,
221 	.vunmap = drm_gem_shmem_object_vunmap,
222 	.mmap = amdxdna_gem_obj_mmap,
223 	.vm_ops = &amdxdna_gem_vm_ops,
224 };
225 
226 static struct amdxdna_gem_obj *
227 amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
228 {
229 	struct amdxdna_gem_obj *abo;
230 
231 	abo = kzalloc(sizeof(*abo), GFP_KERNEL);
232 	if (!abo)
233 		return ERR_PTR(-ENOMEM);
234 
235 	abo->pinned = false;
236 	abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
237 	mutex_init(&abo->lock);
238 
239 	abo->mem.userptr = AMDXDNA_INVALID_ADDR;
240 	abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
241 	abo->mem.size = size;
242 
243 	return abo;
244 }
245 
246 /* For drm_driver->gem_create_object callback */
247 struct drm_gem_object *
248 amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
249 {
250 	struct amdxdna_gem_obj *abo;
251 
252 	abo = amdxdna_gem_create_obj(dev, size);
253 	if (IS_ERR(abo))
254 		return ERR_CAST(abo);
255 
256 	to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
257 
258 	return to_gobj(abo);
259 }
260 
261 static struct amdxdna_gem_obj *
262 amdxdna_drm_alloc_shmem(struct drm_device *dev,
263 			struct amdxdna_drm_create_bo *args,
264 			struct drm_file *filp)
265 {
266 	struct amdxdna_client *client = filp->driver_priv;
267 	struct drm_gem_shmem_object *shmem;
268 	struct amdxdna_gem_obj *abo;
269 
270 	shmem = drm_gem_shmem_create(dev, args->size);
271 	if (IS_ERR(shmem))
272 		return ERR_CAST(shmem);
273 
274 	shmem->map_wc = false;
275 
276 	abo = to_xdna_obj(&shmem->base);
277 	abo->client = client;
278 	abo->type = AMDXDNA_BO_SHMEM;
279 
280 	return abo;
281 }
282 
283 static struct amdxdna_gem_obj *
284 amdxdna_drm_create_dev_heap(struct drm_device *dev,
285 			    struct amdxdna_drm_create_bo *args,
286 			    struct drm_file *filp)
287 {
288 	struct amdxdna_client *client = filp->driver_priv;
289 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
290 	struct drm_gem_shmem_object *shmem;
291 	struct amdxdna_gem_obj *abo;
292 	int ret;
293 
294 	if (args->size > xdna->dev_info->dev_mem_size) {
295 		XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
296 			 args->size, xdna->dev_info->dev_mem_size);
297 		return ERR_PTR(-EINVAL);
298 	}
299 
300 	mutex_lock(&client->mm_lock);
301 	if (client->dev_heap) {
302 		XDNA_DBG(client->xdna, "dev heap is already created");
303 		ret = -EBUSY;
304 		goto mm_unlock;
305 	}
306 
307 	shmem = drm_gem_shmem_create(dev, args->size);
308 	if (IS_ERR(shmem)) {
309 		ret = PTR_ERR(shmem);
310 		goto mm_unlock;
311 	}
312 
313 	shmem->map_wc = false;
314 	abo = to_xdna_obj(&shmem->base);
315 
316 	abo->type = AMDXDNA_BO_DEV_HEAP;
317 	abo->client = client;
318 	abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
319 	drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
320 
321 	client->dev_heap = abo;
322 	drm_gem_object_get(to_gobj(abo));
323 	mutex_unlock(&client->mm_lock);
324 
325 	return abo;
326 
327 mm_unlock:
328 	mutex_unlock(&client->mm_lock);
329 	return ERR_PTR(ret);
330 }
331 
332 struct amdxdna_gem_obj *
333 amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
334 			 struct amdxdna_drm_create_bo *args,
335 			 struct drm_file *filp, bool use_vmap)
336 {
337 	struct amdxdna_client *client = filp->driver_priv;
338 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
339 	size_t aligned_sz = PAGE_ALIGN(args->size);
340 	struct amdxdna_gem_obj *abo, *heap;
341 	int ret;
342 
343 	mutex_lock(&client->mm_lock);
344 	heap = client->dev_heap;
345 	if (!heap) {
346 		ret = -EINVAL;
347 		goto mm_unlock;
348 	}
349 
350 	if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
351 		XDNA_ERR(xdna, "Invalid dev heap userptr");
352 		ret = -EINVAL;
353 		goto mm_unlock;
354 	}
355 
356 	if (args->size > heap->mem.size) {
357 		XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
358 			 args->size, heap->mem.size);
359 		ret = -EINVAL;
360 		goto mm_unlock;
361 	}
362 
363 	abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
364 	if (IS_ERR(abo)) {
365 		ret = PTR_ERR(abo);
366 		goto mm_unlock;
367 	}
368 	to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
369 	abo->type = AMDXDNA_BO_DEV;
370 	abo->client = client;
371 	abo->dev_heap = heap;
372 	ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
373 	if (ret) {
374 		XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
375 		goto mm_unlock;
376 	}
377 
378 	drm_gem_object_get(to_gobj(heap));
379 	drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
380 
381 	mutex_unlock(&client->mm_lock);
382 	return abo;
383 
384 mm_unlock:
385 	mutex_unlock(&client->mm_lock);
386 	return ERR_PTR(ret);
387 }
388 
389 static struct amdxdna_gem_obj *
390 amdxdna_drm_create_cmd_bo(struct drm_device *dev,
391 			  struct amdxdna_drm_create_bo *args,
392 			  struct drm_file *filp)
393 {
394 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
395 	struct drm_gem_shmem_object *shmem;
396 	struct amdxdna_gem_obj *abo;
397 	struct iosys_map map;
398 	int ret;
399 
400 	if (args->size > XDNA_MAX_CMD_BO_SIZE) {
401 		XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
402 		return ERR_PTR(-EINVAL);
403 	}
404 
405 	if (args->size < sizeof(struct amdxdna_cmd)) {
406 		XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
407 		return ERR_PTR(-EINVAL);
408 	}
409 
410 	shmem = drm_gem_shmem_create(dev, args->size);
411 	if (IS_ERR(shmem))
412 		return ERR_CAST(shmem);
413 
414 	shmem->map_wc = false;
415 	abo = to_xdna_obj(&shmem->base);
416 
417 	abo->type = AMDXDNA_BO_CMD;
418 	abo->client = filp->driver_priv;
419 
420 	ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
421 	if (ret) {
422 		XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
423 		goto release_obj;
424 	}
425 	abo->mem.kva = map.vaddr;
426 
427 	return abo;
428 
429 release_obj:
430 	drm_gem_shmem_free(shmem);
431 	return ERR_PTR(ret);
432 }
433 
434 int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
435 {
436 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
437 	struct amdxdna_drm_create_bo *args = data;
438 	struct amdxdna_gem_obj *abo;
439 	int ret;
440 
441 	if (args->flags || args->vaddr || !args->size)
442 		return -EINVAL;
443 
444 	XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
445 		 args->type, args->vaddr, args->size, args->flags);
446 	switch (args->type) {
447 	case AMDXDNA_BO_SHMEM:
448 		abo = amdxdna_drm_alloc_shmem(dev, args, filp);
449 		break;
450 	case AMDXDNA_BO_DEV_HEAP:
451 		abo = amdxdna_drm_create_dev_heap(dev, args, filp);
452 		break;
453 	case AMDXDNA_BO_DEV:
454 		abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
455 		break;
456 	case AMDXDNA_BO_CMD:
457 		abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
458 		break;
459 	default:
460 		return -EINVAL;
461 	}
462 	if (IS_ERR(abo))
463 		return PTR_ERR(abo);
464 
465 	/* ready to publish object to userspace */
466 	ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
467 	if (ret) {
468 		XDNA_ERR(xdna, "Create handle failed");
469 		goto put_obj;
470 	}
471 
472 	XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
473 		 args->handle, args->type, abo->mem.userptr,
474 		 abo->mem.dev_addr, abo->mem.size);
475 put_obj:
476 	/* Dereference object reference. Handle holds it now. */
477 	drm_gem_object_put(to_gobj(abo));
478 	return ret;
479 }
480 
481 int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
482 {
483 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
484 	int ret;
485 
486 	switch (abo->type) {
487 	case AMDXDNA_BO_SHMEM:
488 	case AMDXDNA_BO_DEV_HEAP:
489 		ret = drm_gem_shmem_pin(&abo->base);
490 		break;
491 	case AMDXDNA_BO_DEV:
492 		ret = drm_gem_shmem_pin(&abo->dev_heap->base);
493 		break;
494 	default:
495 		ret = -EOPNOTSUPP;
496 	}
497 
498 	XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
499 	return ret;
500 }
501 
502 int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
503 {
504 	int ret;
505 
506 	if (abo->type == AMDXDNA_BO_DEV)
507 		abo = abo->dev_heap;
508 
509 	mutex_lock(&abo->lock);
510 	ret = amdxdna_gem_pin_nolock(abo);
511 	mutex_unlock(&abo->lock);
512 
513 	return ret;
514 }
515 
516 void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
517 {
518 	if (abo->type == AMDXDNA_BO_DEV)
519 		abo = abo->dev_heap;
520 
521 	mutex_lock(&abo->lock);
522 	drm_gem_shmem_unpin(&abo->base);
523 	mutex_unlock(&abo->lock);
524 }
525 
526 struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
527 					    u32 bo_hdl, u8 bo_type)
528 {
529 	struct amdxdna_dev *xdna = client->xdna;
530 	struct amdxdna_gem_obj *abo;
531 	struct drm_gem_object *gobj;
532 
533 	gobj = drm_gem_object_lookup(client->filp, bo_hdl);
534 	if (!gobj) {
535 		XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
536 		return NULL;
537 	}
538 
539 	abo = to_xdna_obj(gobj);
540 	if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
541 		return abo;
542 
543 	drm_gem_object_put(gobj);
544 	return NULL;
545 }
546 
547 int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
548 {
549 	struct amdxdna_drm_get_bo_info *args = data;
550 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
551 	struct amdxdna_gem_obj *abo;
552 	struct drm_gem_object *gobj;
553 	int ret = 0;
554 
555 	if (args->ext || args->ext_flags || args->pad)
556 		return -EINVAL;
557 
558 	gobj = drm_gem_object_lookup(filp, args->handle);
559 	if (!gobj) {
560 		XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
561 		return -ENOENT;
562 	}
563 
564 	abo = to_xdna_obj(gobj);
565 	args->vaddr = abo->mem.userptr;
566 	args->xdna_addr = abo->mem.dev_addr;
567 
568 	if (abo->type != AMDXDNA_BO_DEV)
569 		args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
570 	else
571 		args->map_offset = AMDXDNA_INVALID_ADDR;
572 
573 	XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
574 		 args->handle, args->map_offset, args->vaddr, args->xdna_addr);
575 
576 	drm_gem_object_put(gobj);
577 	return ret;
578 }
579 
580 /*
581  * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
582  * This is required because NPU is not cache coherent device. CPU cache
583  * flushing/invalidation is expensive so it is best to handle this outside
584  * of the command submission path. This ioctl allows explicit cache
585  * flushing/invalidation outside of the critical path.
586  */
587 int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
588 			      void *data, struct drm_file *filp)
589 {
590 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
591 	struct amdxdna_drm_sync_bo *args = data;
592 	struct amdxdna_gem_obj *abo;
593 	struct drm_gem_object *gobj;
594 	int ret;
595 
596 	gobj = drm_gem_object_lookup(filp, args->handle);
597 	if (!gobj) {
598 		XDNA_ERR(xdna, "Lookup GEM object failed");
599 		return -ENOENT;
600 	}
601 	abo = to_xdna_obj(gobj);
602 
603 	ret = amdxdna_gem_pin(abo);
604 	if (ret) {
605 		XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
606 		goto put_obj;
607 	}
608 
609 	if (abo->type == AMDXDNA_BO_DEV)
610 		drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
611 	else
612 		drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
613 
614 	amdxdna_gem_unpin(abo);
615 
616 	XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
617 		 args->handle, args->offset, args->size);
618 
619 put_obj:
620 	drm_gem_object_put(gobj);
621 	return ret;
622 }
623