xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2024 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 #include <linux/sched.h>
25 #include <drm/drm_exec.h>
26 #include "amdgpu.h"
27 
28 #define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
29 #define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
30 
31 static const char *
amdgpu_eviction_fence_get_driver_name(struct dma_fence * fence)32 amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
33 {
34 	return "amdgpu_eviction_fence";
35 }
36 
37 static const char *
amdgpu_eviction_fence_get_timeline_name(struct dma_fence * f)38 amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
39 {
40 	struct amdgpu_eviction_fence *ef;
41 
42 	ef = container_of(f, struct amdgpu_eviction_fence, base);
43 	return ef->timeline_name;
44 }
45 
46 int
amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr * evf_mgr,struct drm_exec * exec)47 amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
48 				    struct drm_exec *exec)
49 {
50 	struct amdgpu_eviction_fence *old_ef, *new_ef;
51 	struct drm_gem_object *obj;
52 	unsigned long index;
53 	int ret;
54 
55 	if (evf_mgr->ev_fence &&
56 	    !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
57 		return 0;
58 	/*
59 	 * Steps to replace eviction fence:
60 	 * * lock all objects in exec (caller)
61 	 * * create a new eviction fence
62 	 * * update new eviction fence in evf_mgr
63 	 * * attach the new eviction fence to BOs
64 	 * * release the old fence
65 	 * * unlock the objects (caller)
66 	 */
67 	new_ef = amdgpu_eviction_fence_create(evf_mgr);
68 	if (!new_ef) {
69 		DRM_ERROR("Failed to create new eviction fence\n");
70 		return -ENOMEM;
71 	}
72 
73 	/* Update the eviction fence now */
74 	spin_lock(&evf_mgr->ev_fence_lock);
75 	old_ef = evf_mgr->ev_fence;
76 	evf_mgr->ev_fence = new_ef;
77 	spin_unlock(&evf_mgr->ev_fence_lock);
78 
79 	/* Attach the new fence */
80 	drm_exec_for_each_locked_object(exec, index, obj) {
81 		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
82 
83 		if (!bo)
84 			continue;
85 		ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
86 		if (ret) {
87 			DRM_ERROR("Failed to attch new eviction fence\n");
88 			goto free_err;
89 		}
90 	}
91 
92 	/* Free old fence */
93 	if (old_ef)
94 		dma_fence_put(&old_ef->base);
95 	return 0;
96 
97 free_err:
98 	kfree(new_ef);
99 	return ret;
100 }
101 
102 static void
amdgpu_eviction_fence_suspend_worker(struct work_struct * work)103 amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
104 {
105 	struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
106 	struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
107 	struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
108 	struct amdgpu_eviction_fence *ev_fence;
109 
110 	mutex_lock(&uq_mgr->userq_mutex);
111 	spin_lock(&evf_mgr->ev_fence_lock);
112 	ev_fence = evf_mgr->ev_fence;
113 	if (ev_fence)
114 		dma_fence_get(&ev_fence->base);
115 	else
116 		goto unlock;
117 	spin_unlock(&evf_mgr->ev_fence_lock);
118 
119 	amdgpu_userq_evict(uq_mgr, ev_fence);
120 
121 	mutex_unlock(&uq_mgr->userq_mutex);
122 	dma_fence_put(&ev_fence->base);
123 	return;
124 
125 unlock:
126 	spin_unlock(&evf_mgr->ev_fence_lock);
127 	mutex_unlock(&uq_mgr->userq_mutex);
128 }
129 
amdgpu_eviction_fence_enable_signaling(struct dma_fence * f)130 static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
131 {
132 	struct amdgpu_eviction_fence_mgr *evf_mgr;
133 	struct amdgpu_eviction_fence *ev_fence;
134 
135 	if (!f)
136 		return true;
137 
138 	ev_fence = to_ev_fence(f);
139 	evf_mgr = ev_fence->evf_mgr;
140 
141 	schedule_delayed_work(&evf_mgr->suspend_work, 0);
142 	return true;
143 }
144 
145 static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
146 	.get_driver_name = amdgpu_eviction_fence_get_driver_name,
147 	.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
148 	.enable_signaling = amdgpu_eviction_fence_enable_signaling,
149 };
150 
amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr * evf_mgr,struct amdgpu_eviction_fence * ev_fence)151 void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
152 				  struct amdgpu_eviction_fence *ev_fence)
153 {
154 	spin_lock(&evf_mgr->ev_fence_lock);
155 	dma_fence_signal(&ev_fence->base);
156 	spin_unlock(&evf_mgr->ev_fence_lock);
157 }
158 
159 struct amdgpu_eviction_fence *
amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr * evf_mgr)160 amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
161 {
162 	struct amdgpu_eviction_fence *ev_fence;
163 
164 	ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
165 	if (!ev_fence)
166 		return NULL;
167 
168 	ev_fence->evf_mgr = evf_mgr;
169 	get_task_comm(ev_fence->timeline_name, current);
170 	spin_lock_init(&ev_fence->lock);
171 	dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
172 			 &ev_fence->lock, evf_mgr->ev_fence_ctx,
173 			 atomic_inc_return(&evf_mgr->ev_fence_seq));
174 	return ev_fence;
175 }
176 
amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr * evf_mgr)177 void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
178 {
179 	struct amdgpu_eviction_fence *ev_fence;
180 
181 	/* Wait for any pending work to execute */
182 	flush_delayed_work(&evf_mgr->suspend_work);
183 
184 	spin_lock(&evf_mgr->ev_fence_lock);
185 	ev_fence = evf_mgr->ev_fence;
186 	spin_unlock(&evf_mgr->ev_fence_lock);
187 
188 	if (!ev_fence)
189 		return;
190 
191 	dma_fence_wait(&ev_fence->base, false);
192 
193 	/* Last unref of ev_fence */
194 	dma_fence_put(&ev_fence->base);
195 }
196 
amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr * evf_mgr,struct amdgpu_bo * bo)197 int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
198 				 struct amdgpu_bo *bo)
199 {
200 	struct amdgpu_eviction_fence *ev_fence;
201 	struct dma_resv *resv = bo->tbo.base.resv;
202 	int ret;
203 
204 	if (!resv)
205 		return 0;
206 
207 	ret = dma_resv_reserve_fences(resv, 1);
208 	if (ret) {
209 		DRM_DEBUG_DRIVER("Failed to resv fence space\n");
210 		return ret;
211 	}
212 
213 	spin_lock(&evf_mgr->ev_fence_lock);
214 	ev_fence = evf_mgr->ev_fence;
215 	if (ev_fence)
216 		dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
217 	spin_unlock(&evf_mgr->ev_fence_lock);
218 
219 	return 0;
220 }
221 
amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr * evf_mgr,struct amdgpu_bo * bo)222 void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
223 				  struct amdgpu_bo *bo)
224 {
225 	struct dma_fence *stub = dma_fence_get_stub();
226 
227 	dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
228 				stub, DMA_RESV_USAGE_BOOKKEEP);
229 	dma_fence_put(stub);
230 }
231 
amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr * evf_mgr)232 int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
233 {
234 	/* This needs to be done one time per open */
235 	atomic_set(&evf_mgr->ev_fence_seq, 0);
236 	evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
237 	spin_lock_init(&evf_mgr->ev_fence_lock);
238 
239 	INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
240 	return 0;
241 }
242