1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2024 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 #include <linux/sched.h> 25 #include <drm/drm_exec.h> 26 #include "amdgpu.h" 27 28 #define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name) 29 #define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr) 30 31 static const char * 32 amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence) 33 { 34 return "amdgpu_eviction_fence"; 35 } 36 37 static const char * 38 amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f) 39 { 40 struct amdgpu_eviction_fence *ef; 41 42 ef = container_of(f, struct amdgpu_eviction_fence, base); 43 return ef->timeline_name; 44 } 45 46 int 47 amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr, 48 struct drm_exec *exec) 49 { 50 struct amdgpu_eviction_fence *old_ef, *new_ef; 51 struct drm_gem_object *obj; 52 unsigned long index; 53 int ret; 54 55 if (evf_mgr->ev_fence && 56 !dma_fence_is_signaled(&evf_mgr->ev_fence->base)) 57 return 0; 58 /* 59 * Steps to replace eviction fence: 60 * * lock all objects in exec (caller) 61 * * create a new eviction fence 62 * * update new eviction fence in evf_mgr 63 * * attach the new eviction fence to BOs 64 * * release the old fence 65 * * unlock the objects (caller) 66 */ 67 new_ef = amdgpu_eviction_fence_create(evf_mgr); 68 if (!new_ef) { 69 DRM_ERROR("Failed to create new eviction fence\n"); 70 return -ENOMEM; 71 } 72 73 /* Update the eviction fence now */ 74 spin_lock(&evf_mgr->ev_fence_lock); 75 old_ef = evf_mgr->ev_fence; 76 evf_mgr->ev_fence = new_ef; 77 spin_unlock(&evf_mgr->ev_fence_lock); 78 79 /* Attach the new fence */ 80 drm_exec_for_each_locked_object(exec, index, obj) { 81 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 82 83 if (!bo) 84 continue; 85 ret = amdgpu_eviction_fence_attach(evf_mgr, bo); 86 if (ret) { 87 DRM_ERROR("Failed to attch new eviction fence\n"); 88 goto free_err; 89 } 90 } 91 92 /* Free old fence */ 93 if (old_ef) 94 dma_fence_put(&old_ef->base); 95 return 0; 96 97 free_err: 98 kfree(new_ef); 99 return ret; 100 } 101 102 static void 103 amdgpu_eviction_fence_suspend_worker(struct work_struct *work) 104 { 105 struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work); 106 struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr); 107 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 108 struct amdgpu_eviction_fence *ev_fence; 109 110 mutex_lock(&uq_mgr->userq_mutex); 111 ev_fence = evf_mgr->ev_fence; 112 if (!ev_fence) 113 goto unlock; 114 115 amdgpu_userq_evict(uq_mgr, ev_fence); 116 117 unlock: 118 mutex_unlock(&uq_mgr->userq_mutex); 119 } 120 121 static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f) 122 { 123 struct amdgpu_eviction_fence_mgr *evf_mgr; 124 struct amdgpu_eviction_fence *ev_fence; 125 126 if (!f) 127 return true; 128 129 ev_fence = to_ev_fence(f); 130 evf_mgr = ev_fence->evf_mgr; 131 132 schedule_delayed_work(&evf_mgr->suspend_work, 0); 133 return true; 134 } 135 136 static const struct dma_fence_ops amdgpu_eviction_fence_ops = { 137 .get_driver_name = amdgpu_eviction_fence_get_driver_name, 138 .get_timeline_name = amdgpu_eviction_fence_get_timeline_name, 139 .enable_signaling = amdgpu_eviction_fence_enable_signaling, 140 }; 141 142 void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr, 143 struct amdgpu_eviction_fence *ev_fence) 144 { 145 spin_lock(&evf_mgr->ev_fence_lock); 146 dma_fence_signal(&ev_fence->base); 147 spin_unlock(&evf_mgr->ev_fence_lock); 148 } 149 150 struct amdgpu_eviction_fence * 151 amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr) 152 { 153 struct amdgpu_eviction_fence *ev_fence; 154 155 ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL); 156 if (!ev_fence) 157 return NULL; 158 159 ev_fence->evf_mgr = evf_mgr; 160 get_task_comm(ev_fence->timeline_name, current); 161 spin_lock_init(&ev_fence->lock); 162 dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops, 163 &ev_fence->lock, evf_mgr->ev_fence_ctx, 164 atomic_inc_return(&evf_mgr->ev_fence_seq)); 165 return ev_fence; 166 } 167 168 void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr) 169 { 170 struct amdgpu_eviction_fence *ev_fence; 171 172 /* Wait for any pending work to execute */ 173 flush_delayed_work(&evf_mgr->suspend_work); 174 175 spin_lock(&evf_mgr->ev_fence_lock); 176 ev_fence = evf_mgr->ev_fence; 177 spin_unlock(&evf_mgr->ev_fence_lock); 178 179 if (!ev_fence) 180 return; 181 182 dma_fence_wait(&ev_fence->base, false); 183 184 /* Last unref of ev_fence */ 185 dma_fence_put(&ev_fence->base); 186 } 187 188 int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr, 189 struct amdgpu_bo *bo) 190 { 191 struct amdgpu_eviction_fence *ev_fence; 192 struct dma_resv *resv = bo->tbo.base.resv; 193 int ret; 194 195 if (!resv) 196 return 0; 197 198 ret = dma_resv_reserve_fences(resv, 1); 199 if (ret) { 200 DRM_DEBUG_DRIVER("Failed to resv fence space\n"); 201 return ret; 202 } 203 204 spin_lock(&evf_mgr->ev_fence_lock); 205 ev_fence = evf_mgr->ev_fence; 206 if (ev_fence) 207 dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP); 208 spin_unlock(&evf_mgr->ev_fence_lock); 209 210 return 0; 211 } 212 213 void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr, 214 struct amdgpu_bo *bo) 215 { 216 struct dma_fence *stub = dma_fence_get_stub(); 217 218 dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx, 219 stub, DMA_RESV_USAGE_BOOKKEEP); 220 dma_fence_put(stub); 221 } 222 223 int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr) 224 { 225 /* This needs to be done one time per open */ 226 atomic_set(&evf_mgr->ev_fence_seq, 0); 227 evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1); 228 spin_lock_init(&evf_mgr->ev_fence_lock); 229 230 INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker); 231 return 0; 232 } 233