xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/dma-fence.h>
25 #include <linux/workqueue.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_gmc.h"
30 
31 struct amdgpu_tlb_fence {
32 	struct dma_fence	base;
33 	struct amdgpu_device	*adev;
34 	struct dma_fence	*dependency;
35 	struct work_struct	work;
36 	spinlock_t		lock;
37 	uint16_t		pasid;
38 
39 };
40 
41 static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence)
42 {
43 	return "amdgpu tlb fence";
44 }
45 
46 static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
47 {
48 	return "amdgpu tlb timeline";
49 }
50 
51 static void amdgpu_tlb_fence_work(struct work_struct *work)
52 {
53 	struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
54 	int r;
55 
56 	if (f->dependency) {
57 		dma_fence_wait(f->dependency, false);
58 		dma_fence_put(f->dependency);
59 		f->dependency = NULL;
60 	}
61 
62 	r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
63 	if (r) {
64 		dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n",
65 			f->pasid);
66 		dma_fence_set_error(&f->base, r);
67 	}
68 
69 	dma_fence_signal(&f->base);
70 	dma_fence_put(&f->base);
71 }
72 
73 static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
74 	.use_64bit_seqno = true,
75 	.get_driver_name = amdgpu_tlb_fence_get_driver_name,
76 	.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
77 };
78 
79 void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
80 				struct dma_fence **fence)
81 {
82 	struct amdgpu_tlb_fence *f;
83 
84 	f = kmalloc(sizeof(*f), GFP_KERNEL);
85 	if (!f) {
86 		/*
87 		 * We can't fail since the PDEs and PTEs are already updated, so
88 		 * just block for the dependency and execute the TLB flush
89 		 */
90 		if (*fence)
91 			dma_fence_wait(*fence, false);
92 
93 		amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
94 		*fence = dma_fence_get_stub();
95 		return;
96 	}
97 
98 	f->adev = adev;
99 	f->dependency = *fence;
100 	f->pasid = vm->pasid;
101 	INIT_WORK(&f->work, amdgpu_tlb_fence_work);
102 	spin_lock_init(&f->lock);
103 
104 	dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
105 		       vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
106 
107 	/* TODO: We probably need a separate wq here */
108 	dma_fence_get(&f->base);
109 	schedule_work(&f->work);
110 
111 	*fence = &f->base;
112 }
113