1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2024 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 #include <linux/sched.h> 25 #include <drm/drm_exec.h> 26 #include "amdgpu.h" 27 28 static const char * 29 amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence) 30 { 31 return "amdgpu_eviction_fence"; 32 } 33 34 static const char * 35 amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f) 36 { 37 struct amdgpu_eviction_fence *ef; 38 39 ef = container_of(f, struct amdgpu_eviction_fence, base); 40 return ef->timeline_name; 41 } 42 43 static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f) 44 { 45 struct amdgpu_eviction_fence *ev_fence = to_ev_fence(f); 46 47 schedule_work(&ev_fence->evf_mgr->suspend_work); 48 return true; 49 } 50 51 static const struct dma_fence_ops amdgpu_eviction_fence_ops = { 52 .get_driver_name = amdgpu_eviction_fence_get_driver_name, 53 .get_timeline_name = amdgpu_eviction_fence_get_timeline_name, 54 .enable_signaling = amdgpu_eviction_fence_enable_signaling, 55 }; 56 57 static void 58 amdgpu_eviction_fence_suspend_worker(struct work_struct *work) 59 { 60 struct amdgpu_eviction_fence_mgr *evf_mgr = 61 container_of(work, struct amdgpu_eviction_fence_mgr, 62 suspend_work); 63 struct amdgpu_fpriv *fpriv = 64 container_of(evf_mgr, struct amdgpu_fpriv, evf_mgr); 65 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 66 struct dma_fence *ev_fence; 67 bool cookie; 68 69 mutex_lock(&uq_mgr->userq_mutex); 70 71 /* 72 * This is intentionally after taking the userq_mutex since we do 73 * allocate memory while holding this lock, but only after ensuring that 74 * the eviction fence is signaled. 75 */ 76 cookie = dma_fence_begin_signalling(); 77 78 ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr); 79 amdgpu_userq_evict(uq_mgr, !evf_mgr->shutdown); 80 81 /* 82 * Signaling the eviction fence must be done while holding the 83 * userq_mutex. Otherwise we won't resume the queues before issuing the 84 * next fence. 85 */ 86 dma_fence_signal(ev_fence); 87 dma_fence_end_signalling(cookie); 88 dma_fence_put(ev_fence); 89 mutex_unlock(&uq_mgr->userq_mutex); 90 } 91 92 int amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr, 93 struct amdgpu_bo *bo) 94 { 95 struct dma_fence *ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr); 96 struct ttm_operation_ctx ctx = { false, false }; 97 struct dma_resv *resv = bo->tbo.base.resv; 98 int ret; 99 100 if (!dma_fence_is_signaled(ev_fence)) { 101 102 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 103 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 104 if (!ret) 105 dma_resv_add_fence(resv, ev_fence, 106 DMA_RESV_USAGE_BOOKKEEP); 107 } else { 108 ret = 0; 109 } 110 111 dma_fence_put(ev_fence); 112 return ret; 113 } 114 115 int amdgpu_evf_mgr_rearm(struct amdgpu_eviction_fence_mgr *evf_mgr, 116 struct drm_exec *exec) 117 { 118 struct amdgpu_eviction_fence *ev_fence; 119 struct drm_gem_object *obj; 120 unsigned long index; 121 122 /* Create and initialize a new eviction fence */ 123 ev_fence = kzalloc_obj(*ev_fence); 124 if (!ev_fence) 125 return -ENOMEM; 126 127 ev_fence->evf_mgr = evf_mgr; 128 get_task_comm(ev_fence->timeline_name, current); 129 spin_lock_init(&ev_fence->lock); 130 dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops, 131 &ev_fence->lock, evf_mgr->ev_fence_ctx, 132 atomic_inc_return(&evf_mgr->ev_fence_seq)); 133 134 /* Remember it for newly added BOs */ 135 dma_fence_put(evf_mgr->ev_fence); 136 evf_mgr->ev_fence = &ev_fence->base; 137 138 /* And add it to all existing BOs */ 139 drm_exec_for_each_locked_object(exec, index, obj) { 140 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 141 142 amdgpu_evf_mgr_attach_fence(evf_mgr, bo); 143 } 144 return 0; 145 } 146 147 void amdgpu_evf_mgr_detach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr, 148 struct amdgpu_bo *bo) 149 { 150 struct dma_fence *stub = dma_fence_get_stub(); 151 152 dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx, 153 stub, DMA_RESV_USAGE_BOOKKEEP); 154 dma_fence_put(stub); 155 } 156 157 void amdgpu_evf_mgr_init(struct amdgpu_eviction_fence_mgr *evf_mgr) 158 { 159 atomic_set(&evf_mgr->ev_fence_seq, 0); 160 evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1); 161 evf_mgr->ev_fence = dma_fence_get_stub(); 162 163 INIT_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker); 164 } 165 166 void amdgpu_evf_mgr_shutdown(struct amdgpu_eviction_fence_mgr *evf_mgr) 167 { 168 evf_mgr->shutdown = true; 169 /* Make sure that the shutdown is visible to the suspend work */ 170 flush_work(&evf_mgr->suspend_work); 171 } 172 173 void amdgpu_evf_mgr_flush_suspend(struct amdgpu_eviction_fence_mgr *evf_mgr) 174 { 175 dma_fence_wait(rcu_dereference_protected(evf_mgr->ev_fence, true), 176 false); 177 /* Make sure that we are done with the last suspend work */ 178 flush_work(&evf_mgr->suspend_work); 179 } 180 181 void amdgpu_evf_mgr_fini(struct amdgpu_eviction_fence_mgr *evf_mgr) 182 { 183 dma_fence_put(evf_mgr->ev_fence); 184 } 185