xref: /linux/drivers/accel/amdxdna/amdxdna_gem.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2024, Advanced Micro Devices, Inc.
4  */
5 
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_cache.h>
8 #include <drm/drm_device.h>
9 #include <drm/drm_gem.h>
10 #include <drm/drm_gem_shmem_helper.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direct.h>
15 #include <linux/iosys-map.h>
16 #include <linux/pagemap.h>
17 #include <linux/vmalloc.h>
18 
19 #include "amdxdna_ctx.h"
20 #include "amdxdna_gem.h"
21 #include "amdxdna_pci_drv.h"
22 #include "amdxdna_ubuf.h"
23 
24 MODULE_IMPORT_NS("DMA_BUF");
25 
26 static int
27 amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
28 {
29 	struct amdxdna_client *client = abo->client;
30 	struct amdxdna_dev *xdna = client->xdna;
31 	struct amdxdna_mem *mem = &abo->mem;
32 	struct amdxdna_gem_obj *heap;
33 	u64 offset;
34 	u32 align;
35 	int ret;
36 
37 	mutex_lock(&client->mm_lock);
38 
39 	heap = client->dev_heap;
40 	if (!heap) {
41 		ret = -EINVAL;
42 		goto unlock_out;
43 	}
44 
45 	if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
46 		XDNA_ERR(xdna, "Invalid dev heap userptr");
47 		ret = -EINVAL;
48 		goto unlock_out;
49 	}
50 
51 	if (mem->size == 0 || mem->size > heap->mem.size) {
52 		XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
53 			 mem->size, heap->mem.size);
54 		ret = -EINVAL;
55 		goto unlock_out;
56 	}
57 
58 	align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
59 	ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
60 					 mem->size, align,
61 					 0, DRM_MM_INSERT_BEST);
62 	if (ret) {
63 		XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
64 		goto unlock_out;
65 	}
66 
67 	mem->dev_addr = abo->mm_node.start;
68 	offset = mem->dev_addr - heap->mem.dev_addr;
69 	mem->userptr = heap->mem.userptr + offset;
70 	mem->kva = heap->mem.kva + offset;
71 
72 	drm_gem_object_get(to_gobj(heap));
73 
74 unlock_out:
75 	mutex_unlock(&client->mm_lock);
76 
77 	return ret;
78 }
79 
80 static void
81 amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
82 {
83 	mutex_destroy(&abo->lock);
84 	kfree(abo);
85 }
86 
87 static void
88 amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
89 {
90 	struct amdxdna_gem_obj *heap;
91 
92 	mutex_lock(&abo->client->mm_lock);
93 
94 	drm_mm_remove_node(&abo->mm_node);
95 
96 	heap = abo->client->dev_heap;
97 	drm_gem_object_put(to_gobj(heap));
98 
99 	mutex_unlock(&abo->client->mm_lock);
100 }
101 
102 static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
103 				   const struct mmu_notifier_range *range,
104 				   unsigned long cur_seq)
105 {
106 	struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
107 	struct amdxdna_gem_obj *abo = mapp->abo;
108 	struct amdxdna_dev *xdna;
109 
110 	xdna = to_xdna_dev(to_gobj(abo)->dev);
111 	XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
112 		 mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
113 
114 	if (!mmu_notifier_range_blockable(range))
115 		return false;
116 
117 	down_write(&xdna->notifier_lock);
118 	abo->mem.map_invalid = true;
119 	mapp->invalid = true;
120 	mmu_interval_set_seq(&mapp->notifier, cur_seq);
121 	up_write(&xdna->notifier_lock);
122 
123 	xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
124 
125 	if (range->event == MMU_NOTIFY_UNMAP) {
126 		down_write(&xdna->notifier_lock);
127 		if (!mapp->unmapped) {
128 			queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
129 			mapp->unmapped = true;
130 		}
131 		up_write(&xdna->notifier_lock);
132 	}
133 
134 	return true;
135 }
136 
137 static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
138 	.invalidate = amdxdna_hmm_invalidate,
139 };
140 
141 static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
142 				   struct vm_area_struct *vma)
143 {
144 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
145 	struct amdxdna_umap *mapp;
146 
147 	down_read(&xdna->notifier_lock);
148 	list_for_each_entry(mapp, &abo->mem.umap_list, node) {
149 		if (!vma || mapp->vma == vma) {
150 			if (!mapp->unmapped) {
151 				queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
152 				mapp->unmapped = true;
153 			}
154 			if (vma)
155 				break;
156 		}
157 	}
158 	up_read(&xdna->notifier_lock);
159 }
160 
161 static void amdxdna_umap_release(struct kref *ref)
162 {
163 	struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
164 	struct vm_area_struct *vma = mapp->vma;
165 	struct amdxdna_dev *xdna;
166 
167 	mmu_interval_notifier_remove(&mapp->notifier);
168 	if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
169 		mapping_clear_unevictable(vma->vm_file->f_mapping);
170 
171 	xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
172 	down_write(&xdna->notifier_lock);
173 	list_del(&mapp->node);
174 	up_write(&xdna->notifier_lock);
175 
176 	kvfree(mapp->range.hmm_pfns);
177 	kfree(mapp);
178 }
179 
180 void amdxdna_umap_put(struct amdxdna_umap *mapp)
181 {
182 	kref_put(&mapp->refcnt, amdxdna_umap_release);
183 }
184 
185 static void amdxdna_hmm_unreg_work(struct work_struct *work)
186 {
187 	struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
188 						 hmm_unreg_work);
189 
190 	amdxdna_umap_put(mapp);
191 }
192 
193 static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
194 				struct vm_area_struct *vma)
195 {
196 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
197 	unsigned long len = vma->vm_end - vma->vm_start;
198 	unsigned long addr = vma->vm_start;
199 	struct amdxdna_umap *mapp;
200 	u32 nr_pages;
201 	int ret;
202 
203 	if (!xdna->dev_info->ops->hmm_invalidate)
204 		return 0;
205 
206 	mapp = kzalloc_obj(*mapp);
207 	if (!mapp)
208 		return -ENOMEM;
209 
210 	nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
211 	mapp->range.hmm_pfns = kvzalloc_objs(*mapp->range.hmm_pfns, nr_pages);
212 	if (!mapp->range.hmm_pfns) {
213 		ret = -ENOMEM;
214 		goto free_map;
215 	}
216 
217 	ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
218 						  current->mm,
219 						  addr,
220 						  len,
221 						  &amdxdna_hmm_ops);
222 	if (ret) {
223 		XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
224 		goto free_pfns;
225 	}
226 
227 	mapp->range.notifier = &mapp->notifier;
228 	mapp->range.start = vma->vm_start;
229 	mapp->range.end = vma->vm_end;
230 	mapp->range.default_flags = HMM_PFN_REQ_FAULT;
231 	mapp->vma = vma;
232 	mapp->abo = abo;
233 	kref_init(&mapp->refcnt);
234 
235 	if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
236 		abo->mem.userptr = addr;
237 	INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
238 	if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
239 		mapping_set_unevictable(vma->vm_file->f_mapping);
240 
241 	down_write(&xdna->notifier_lock);
242 	list_add_tail(&mapp->node, &abo->mem.umap_list);
243 	up_write(&xdna->notifier_lock);
244 
245 	return 0;
246 
247 free_pfns:
248 	kvfree(mapp->range.hmm_pfns);
249 free_map:
250 	kfree(mapp);
251 	return ret;
252 }
253 
254 static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
255 {
256 	struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
257 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
258 
259 	XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
260 	if (abo->pinned)
261 		amdxdna_gem_unpin(abo);
262 
263 	amdxdna_gem_heap_free(abo);
264 	drm_gem_object_release(gobj);
265 	amdxdna_gem_destroy_obj(abo);
266 }
267 
268 static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
269 				struct vm_area_struct *vma)
270 {
271 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
272 	unsigned long num_pages = vma_pages(vma);
273 	unsigned long offset = 0;
274 	int ret;
275 
276 	if (!is_import_bo(abo)) {
277 		ret = drm_gem_shmem_mmap(&abo->base, vma);
278 		if (ret) {
279 			XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
280 			return ret;
281 		}
282 
283 		/* The buffer is based on memory pages. Fix the flag. */
284 		vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
285 		ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
286 				      &num_pages);
287 		if (ret) {
288 			XDNA_ERR(xdna, "Failed insert pages %d", ret);
289 			vma->vm_ops->close(vma);
290 			return ret;
291 		}
292 
293 		return 0;
294 	}
295 
296 	vma->vm_private_data = NULL;
297 	vma->vm_ops = NULL;
298 	ret = dma_buf_mmap(abo->dma_buf, vma, 0);
299 	if (ret) {
300 		XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
301 		return ret;
302 	}
303 
304 	do {
305 		vm_fault_t fault_ret;
306 
307 		fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
308 					    FAULT_FLAG_WRITE, NULL);
309 		if (fault_ret & VM_FAULT_ERROR) {
310 			vma->vm_ops->close(vma);
311 			XDNA_ERR(xdna, "Fault in page failed");
312 			return -EFAULT;
313 		}
314 
315 		offset += PAGE_SIZE;
316 	} while (--num_pages);
317 
318 	/* Drop the reference drm_gem_mmap_obj() acquired.*/
319 	drm_gem_object_put(to_gobj(abo));
320 
321 	return 0;
322 }
323 
324 static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
325 				struct vm_area_struct *vma)
326 {
327 	struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
328 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
329 	int ret;
330 
331 	ret = amdxdna_hmm_register(abo, vma);
332 	if (ret)
333 		return ret;
334 
335 	ret = amdxdna_insert_pages(abo, vma);
336 	if (ret) {
337 		XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
338 		goto hmm_unreg;
339 	}
340 
341 	XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
342 		 drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
343 		 vma->vm_start, gobj->size);
344 	return 0;
345 
346 hmm_unreg:
347 	amdxdna_hmm_unregister(abo, vma);
348 	return ret;
349 }
350 
351 static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
352 {
353 	struct drm_gem_object *gobj = dma_buf->priv;
354 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
355 	unsigned long num_pages = vma_pages(vma);
356 	int ret;
357 
358 	vma->vm_ops = &drm_gem_shmem_vm_ops;
359 	vma->vm_private_data = gobj;
360 
361 	drm_gem_object_get(gobj);
362 	ret = drm_gem_shmem_mmap(&abo->base, vma);
363 	if (ret)
364 		goto put_obj;
365 
366 	/* The buffer is based on memory pages. Fix the flag. */
367 	vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
368 	ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
369 			      &num_pages);
370 	if (ret)
371 		goto close_vma;
372 
373 	return 0;
374 
375 close_vma:
376 	vma->vm_ops->close(vma);
377 put_obj:
378 	drm_gem_object_put(gobj);
379 	return ret;
380 }
381 
382 static const struct dma_buf_ops amdxdna_dmabuf_ops = {
383 	.attach = drm_gem_map_attach,
384 	.detach = drm_gem_map_detach,
385 	.map_dma_buf = drm_gem_map_dma_buf,
386 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
387 	.release = drm_gem_dmabuf_release,
388 	.mmap = amdxdna_gem_dmabuf_mmap,
389 	.vmap = drm_gem_dmabuf_vmap,
390 	.vunmap = drm_gem_dmabuf_vunmap,
391 };
392 
393 static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
394 {
395 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
396 	int ret;
397 
398 	if (is_import_bo(abo))
399 		ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
400 	else
401 		ret = drm_gem_vmap(to_gobj(abo), &map);
402 
403 	*vaddr = map.vaddr;
404 	return ret;
405 }
406 
407 static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
408 {
409 	struct iosys_map map;
410 
411 	if (!abo->mem.kva)
412 		return;
413 
414 	iosys_map_set_vaddr(&map, abo->mem.kva);
415 
416 	if (is_import_bo(abo))
417 		dma_buf_vunmap_unlocked(abo->dma_buf, &map);
418 	else
419 		drm_gem_vunmap(to_gobj(abo), &map);
420 }
421 
422 static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
423 {
424 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
425 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
426 
427 	if (abo->dma_buf) {
428 		get_dma_buf(abo->dma_buf);
429 		return abo->dma_buf;
430 	}
431 
432 	exp_info.ops = &amdxdna_dmabuf_ops;
433 	exp_info.size = gobj->size;
434 	exp_info.flags = flags;
435 	exp_info.priv = gobj;
436 	exp_info.resv = gobj->resv;
437 
438 	return drm_gem_dmabuf_export(gobj->dev, &exp_info);
439 }
440 
441 static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
442 {
443 	dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
444 	dma_buf_detach(abo->dma_buf, abo->attach);
445 	dma_buf_put(abo->dma_buf);
446 	drm_gem_object_release(to_gobj(abo));
447 	kfree(abo);
448 }
449 
450 static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
451 {
452 	struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
453 	struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
454 
455 	XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
456 
457 	amdxdna_hmm_unregister(abo, NULL);
458 	flush_workqueue(xdna->notifier_wq);
459 
460 	if (abo->pinned)
461 		amdxdna_gem_unpin(abo);
462 
463 	if (abo->type == AMDXDNA_BO_DEV_HEAP)
464 		drm_mm_takedown(&abo->mm);
465 
466 	amdxdna_gem_obj_vunmap(abo);
467 	mutex_destroy(&abo->lock);
468 
469 	if (is_import_bo(abo)) {
470 		amdxdna_imported_obj_free(abo);
471 		return;
472 	}
473 
474 	drm_gem_shmem_free(&abo->base);
475 }
476 
477 static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
478 	.free = amdxdna_gem_dev_obj_free,
479 };
480 
481 static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
482 	.free = amdxdna_gem_obj_free,
483 	.print_info = drm_gem_shmem_object_print_info,
484 	.pin = drm_gem_shmem_object_pin,
485 	.unpin = drm_gem_shmem_object_unpin,
486 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
487 	.vmap = drm_gem_shmem_object_vmap,
488 	.vunmap = drm_gem_shmem_object_vunmap,
489 	.mmap = amdxdna_gem_obj_mmap,
490 	.vm_ops = &drm_gem_shmem_vm_ops,
491 	.export = amdxdna_gem_prime_export,
492 };
493 
494 static struct amdxdna_gem_obj *
495 amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
496 {
497 	struct amdxdna_gem_obj *abo;
498 
499 	abo = kzalloc_obj(*abo);
500 	if (!abo)
501 		return ERR_PTR(-ENOMEM);
502 
503 	abo->pinned = false;
504 	abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
505 	mutex_init(&abo->lock);
506 
507 	abo->mem.userptr = AMDXDNA_INVALID_ADDR;
508 	abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
509 	abo->mem.size = size;
510 	INIT_LIST_HEAD(&abo->mem.umap_list);
511 
512 	return abo;
513 }
514 
515 /* For drm_driver->gem_create_object callback */
516 struct drm_gem_object *
517 amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
518 {
519 	struct amdxdna_gem_obj *abo;
520 
521 	abo = amdxdna_gem_create_obj(dev, size);
522 	if (IS_ERR(abo))
523 		return ERR_CAST(abo);
524 
525 	to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
526 
527 	return to_gobj(abo);
528 }
529 
530 static struct amdxdna_gem_obj *
531 amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
532 {
533 	struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
534 
535 	if (IS_ERR(shmem))
536 		return ERR_CAST(shmem);
537 
538 	shmem->map_wc = false;
539 	return to_xdna_obj(&shmem->base);
540 }
541 
542 static struct amdxdna_gem_obj *
543 amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
544 {
545 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
546 	enum amdxdna_ubuf_flag flags = 0;
547 	struct amdxdna_drm_va_tbl va_tbl;
548 	struct drm_gem_object *gobj;
549 	struct dma_buf *dma_buf;
550 
551 	if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
552 		XDNA_DBG(xdna, "Access va table failed");
553 		return ERR_PTR(-EINVAL);
554 	}
555 
556 	if (va_tbl.num_entries) {
557 		if (args->type == AMDXDNA_BO_CMD)
558 			flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
559 
560 		dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
561 					   u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
562 	} else {
563 		dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
564 	}
565 
566 	if (IS_ERR(dma_buf))
567 		return ERR_CAST(dma_buf);
568 
569 	gobj = amdxdna_gem_prime_import(dev, dma_buf);
570 	if (IS_ERR(gobj)) {
571 		dma_buf_put(dma_buf);
572 		return ERR_CAST(gobj);
573 	}
574 
575 	dma_buf_put(dma_buf);
576 
577 	return to_xdna_obj(gobj);
578 }
579 
580 static struct amdxdna_gem_obj *
581 amdxdna_gem_create_object(struct drm_device *dev,
582 			  struct amdxdna_drm_create_bo *args)
583 {
584 	size_t aligned_sz = PAGE_ALIGN(args->size);
585 
586 	if (args->vaddr)
587 		return amdxdna_gem_create_ubuf_object(dev, args);
588 
589 	return amdxdna_gem_create_shmem_object(dev, aligned_sz);
590 }
591 
592 struct drm_gem_object *
593 amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
594 {
595 	struct dma_buf_attachment *attach;
596 	struct amdxdna_gem_obj *abo;
597 	struct drm_gem_object *gobj;
598 	struct sg_table *sgt;
599 	int ret;
600 
601 	get_dma_buf(dma_buf);
602 
603 	attach = dma_buf_attach(dma_buf, dev->dev);
604 	if (IS_ERR(attach)) {
605 		ret = PTR_ERR(attach);
606 		goto put_buf;
607 	}
608 
609 	sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
610 	if (IS_ERR(sgt)) {
611 		ret = PTR_ERR(sgt);
612 		goto fail_detach;
613 	}
614 
615 	gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
616 	if (IS_ERR(gobj)) {
617 		ret = PTR_ERR(gobj);
618 		goto fail_unmap;
619 	}
620 
621 	abo = to_xdna_obj(gobj);
622 	abo->attach = attach;
623 	abo->dma_buf = dma_buf;
624 
625 	return gobj;
626 
627 fail_unmap:
628 	dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
629 fail_detach:
630 	dma_buf_detach(dma_buf, attach);
631 put_buf:
632 	dma_buf_put(dma_buf);
633 
634 	return ERR_PTR(ret);
635 }
636 
637 static struct amdxdna_gem_obj *
638 amdxdna_drm_alloc_shmem(struct drm_device *dev,
639 			struct amdxdna_drm_create_bo *args,
640 			struct drm_file *filp)
641 {
642 	struct amdxdna_client *client = filp->driver_priv;
643 	struct amdxdna_gem_obj *abo;
644 
645 	abo = amdxdna_gem_create_object(dev, args);
646 	if (IS_ERR(abo))
647 		return ERR_CAST(abo);
648 
649 	abo->client = client;
650 	abo->type = AMDXDNA_BO_SHMEM;
651 
652 	return abo;
653 }
654 
655 static struct amdxdna_gem_obj *
656 amdxdna_drm_create_dev_heap(struct drm_device *dev,
657 			    struct amdxdna_drm_create_bo *args,
658 			    struct drm_file *filp)
659 {
660 	struct amdxdna_client *client = filp->driver_priv;
661 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
662 	struct amdxdna_gem_obj *abo;
663 	int ret;
664 
665 	if (args->size > xdna->dev_info->dev_mem_size) {
666 		XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
667 			 args->size, xdna->dev_info->dev_mem_size);
668 		return ERR_PTR(-EINVAL);
669 	}
670 
671 	mutex_lock(&client->mm_lock);
672 	if (client->dev_heap) {
673 		XDNA_DBG(client->xdna, "dev heap is already created");
674 		ret = -EBUSY;
675 		goto mm_unlock;
676 	}
677 
678 	abo = amdxdna_gem_create_object(dev, args);
679 	if (IS_ERR(abo)) {
680 		ret = PTR_ERR(abo);
681 		goto mm_unlock;
682 	}
683 
684 	abo->type = AMDXDNA_BO_DEV_HEAP;
685 	abo->client = client;
686 	abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
687 	drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
688 
689 	ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
690 	if (ret) {
691 		XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
692 		goto release_obj;
693 	}
694 
695 	client->dev_heap = abo;
696 	drm_gem_object_get(to_gobj(abo));
697 	mutex_unlock(&client->mm_lock);
698 
699 	return abo;
700 
701 release_obj:
702 	drm_gem_object_put(to_gobj(abo));
703 mm_unlock:
704 	mutex_unlock(&client->mm_lock);
705 	return ERR_PTR(ret);
706 }
707 
708 struct amdxdna_gem_obj *
709 amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
710 			 struct amdxdna_drm_create_bo *args,
711 			 struct drm_file *filp)
712 {
713 	struct amdxdna_client *client = filp->driver_priv;
714 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
715 	size_t aligned_sz = PAGE_ALIGN(args->size);
716 	struct amdxdna_gem_obj *abo;
717 	int ret;
718 
719 	abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
720 	if (IS_ERR(abo))
721 		return abo;
722 
723 	to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
724 	abo->type = AMDXDNA_BO_DEV;
725 	abo->client = client;
726 
727 	ret = amdxdna_gem_heap_alloc(abo);
728 	if (ret) {
729 		XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
730 		amdxdna_gem_destroy_obj(abo);
731 		return ERR_PTR(ret);
732 	}
733 
734 	drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
735 
736 	return abo;
737 }
738 
739 static struct amdxdna_gem_obj *
740 amdxdna_drm_create_cmd_bo(struct drm_device *dev,
741 			  struct amdxdna_drm_create_bo *args,
742 			  struct drm_file *filp)
743 {
744 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
745 	struct amdxdna_gem_obj *abo;
746 
747 	if (args->size < sizeof(struct amdxdna_cmd)) {
748 		XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
749 		return ERR_PTR(-EINVAL);
750 	}
751 
752 	abo = amdxdna_gem_create_object(dev, args);
753 	if (IS_ERR(abo))
754 		return ERR_CAST(abo);
755 
756 	abo->type = AMDXDNA_BO_CMD;
757 	abo->client = filp->driver_priv;
758 
759 	return abo;
760 }
761 
762 int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
763 {
764 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
765 	struct amdxdna_drm_create_bo *args = data;
766 	struct amdxdna_gem_obj *abo;
767 	int ret;
768 
769 	if (args->flags)
770 		return -EINVAL;
771 
772 	XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
773 		 args->type, args->vaddr, args->size, args->flags);
774 	switch (args->type) {
775 	case AMDXDNA_BO_SHMEM:
776 		abo = amdxdna_drm_alloc_shmem(dev, args, filp);
777 		break;
778 	case AMDXDNA_BO_DEV_HEAP:
779 		abo = amdxdna_drm_create_dev_heap(dev, args, filp);
780 		break;
781 	case AMDXDNA_BO_DEV:
782 		abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
783 		break;
784 	case AMDXDNA_BO_CMD:
785 		abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
786 		break;
787 	default:
788 		return -EINVAL;
789 	}
790 	if (IS_ERR(abo))
791 		return PTR_ERR(abo);
792 
793 	/* ready to publish object to userspace */
794 	ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
795 	if (ret) {
796 		XDNA_ERR(xdna, "Create handle failed");
797 		goto put_obj;
798 	}
799 
800 	XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
801 		 args->handle, args->type, abo->mem.userptr,
802 		 abo->mem.dev_addr, abo->mem.size);
803 put_obj:
804 	/* Dereference object reference. Handle holds it now. */
805 	drm_gem_object_put(to_gobj(abo));
806 	return ret;
807 }
808 
809 int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
810 {
811 	struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
812 	int ret;
813 
814 	if (abo->type == AMDXDNA_BO_DEV)
815 		abo = abo->client->dev_heap;
816 
817 	if (is_import_bo(abo))
818 		return 0;
819 
820 	ret = drm_gem_shmem_pin(&abo->base);
821 
822 	XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
823 	return ret;
824 }
825 
826 int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
827 {
828 	int ret;
829 
830 	mutex_lock(&abo->lock);
831 	ret = amdxdna_gem_pin_nolock(abo);
832 	mutex_unlock(&abo->lock);
833 
834 	return ret;
835 }
836 
837 void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
838 {
839 	if (abo->type == AMDXDNA_BO_DEV)
840 		abo = abo->client->dev_heap;
841 
842 	if (is_import_bo(abo))
843 		return;
844 
845 	mutex_lock(&abo->lock);
846 	drm_gem_shmem_unpin(&abo->base);
847 	mutex_unlock(&abo->lock);
848 }
849 
850 struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
851 					    u32 bo_hdl, u8 bo_type)
852 {
853 	struct amdxdna_dev *xdna = client->xdna;
854 	struct amdxdna_gem_obj *abo;
855 	struct drm_gem_object *gobj;
856 	int ret;
857 
858 	gobj = drm_gem_object_lookup(client->filp, bo_hdl);
859 	if (!gobj) {
860 		XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
861 		return NULL;
862 	}
863 
864 	abo = to_xdna_obj(gobj);
865 	if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type)
866 		goto put_obj;
867 
868 	if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva)
869 		return abo;
870 
871 	if (abo->mem.size > SZ_32K) {
872 		XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size);
873 		goto put_obj;
874 	}
875 
876 	ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
877 	if (ret) {
878 		XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
879 		goto put_obj;
880 	}
881 
882 	return abo;
883 
884 put_obj:
885 	drm_gem_object_put(gobj);
886 	return NULL;
887 }
888 
889 int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
890 {
891 	struct amdxdna_drm_get_bo_info *args = data;
892 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
893 	struct amdxdna_gem_obj *abo;
894 	struct drm_gem_object *gobj;
895 	int ret = 0;
896 
897 	if (args->ext || args->ext_flags || args->pad)
898 		return -EINVAL;
899 
900 	gobj = drm_gem_object_lookup(filp, args->handle);
901 	if (!gobj) {
902 		XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
903 		return -ENOENT;
904 	}
905 
906 	abo = to_xdna_obj(gobj);
907 	args->vaddr = abo->mem.userptr;
908 	args->xdna_addr = abo->mem.dev_addr;
909 
910 	if (abo->type != AMDXDNA_BO_DEV)
911 		args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
912 	else
913 		args->map_offset = AMDXDNA_INVALID_ADDR;
914 
915 	XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
916 		 args->handle, args->map_offset, args->vaddr, args->xdna_addr);
917 
918 	drm_gem_object_put(gobj);
919 	return ret;
920 }
921 
922 /*
923  * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
924  * This is required because NPU is not cache coherent device. CPU cache
925  * flushing/invalidation is expensive so it is best to handle this outside
926  * of the command submission path. This ioctl allows explicit cache
927  * flushing/invalidation outside of the critical path.
928  */
929 int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
930 			      void *data, struct drm_file *filp)
931 {
932 	struct amdxdna_dev *xdna = to_xdna_dev(dev);
933 	struct amdxdna_drm_sync_bo *args = data;
934 	struct amdxdna_gem_obj *abo;
935 	struct drm_gem_object *gobj;
936 	int ret;
937 
938 	gobj = drm_gem_object_lookup(filp, args->handle);
939 	if (!gobj) {
940 		XDNA_ERR(xdna, "Lookup GEM object failed");
941 		return -ENOENT;
942 	}
943 	abo = to_xdna_obj(gobj);
944 
945 	ret = amdxdna_gem_pin(abo);
946 	if (ret) {
947 		XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
948 		goto put_obj;
949 	}
950 
951 	if (is_import_bo(abo))
952 		drm_clflush_sg(abo->base.sgt);
953 	else if (abo->mem.kva)
954 		drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
955 	else if (abo->base.pages)
956 		drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
957 	else
958 		drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
959 
960 	amdxdna_gem_unpin(abo);
961 
962 	XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
963 		 args->handle, args->offset, args->size);
964 
965 	if (args->direction == SYNC_DIRECT_FROM_DEVICE)
966 		ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle);
967 
968 put_obj:
969 	drm_gem_object_put(gobj);
970 	return ret;
971 }
972