1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "amdgpu_vm.h" 24 #include "amdgpu_job.h" 25 #include "amdgpu_object.h" 26 #include "amdgpu_trace.h" 27 28 #define AMDGPU_VM_SDMA_MIN_NUM_DW 256u 29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u) 30 31 /** 32 * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped 33 * 34 * @table: newly allocated or validated PD/PT 35 */ 36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) 37 { 38 int r; 39 40 r = amdgpu_ttm_alloc_gart(&table->bo.tbo); 41 if (r) 42 return r; 43 44 if (table->shadow) 45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); 46 47 return r; 48 } 49 50 /* Allocate a new job for @count PTE updates */ 51 static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, 52 unsigned int count) 53 { 54 enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 55 : AMDGPU_IB_POOL_DELAYED; 56 struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate 57 : &p->vm->delayed; 58 unsigned int ndw; 59 int r; 60 61 /* estimate how many dw we need */ 62 ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 63 if (p->pages_addr) 64 ndw += count * 2; 65 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 66 67 r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, 68 ndw * 4, pool, &p->job); 69 if (r) 70 return r; 71 72 p->num_dw_left = ndw; 73 return 0; 74 } 75 76 /** 77 * amdgpu_vm_sdma_prepare - prepare SDMA command submission 78 * 79 * @p: see amdgpu_vm_update_params definition 80 * @resv: reservation object with embedded fence 81 * @sync_mode: synchronization mode 82 * 83 * Returns: 84 * Negativ errno, 0 for success. 85 */ 86 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, 87 struct dma_resv *resv, 88 enum amdgpu_sync_mode sync_mode) 89 { 90 struct amdgpu_sync sync; 91 int r; 92 93 r = amdgpu_vm_sdma_alloc_job(p, 0); 94 if (r) 95 return r; 96 97 if (!resv) 98 return 0; 99 100 amdgpu_sync_create(&sync); 101 r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); 102 if (!r) 103 r = amdgpu_sync_push_to_job(&sync, p->job); 104 amdgpu_sync_free(&sync); 105 106 if (r) { 107 p->num_dw_left = 0; 108 amdgpu_job_free(p->job); 109 } 110 return r; 111 } 112 113 /** 114 * amdgpu_vm_sdma_commit - commit SDMA command submission 115 * 116 * @p: see amdgpu_vm_update_params definition 117 * @fence: resulting fence 118 * 119 * Returns: 120 * Negativ errno, 0 for success. 121 */ 122 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, 123 struct dma_fence **fence) 124 { 125 struct amdgpu_ib *ib = p->job->ibs; 126 struct amdgpu_ring *ring; 127 struct dma_fence *f; 128 129 ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, 130 sched); 131 132 WARN_ON(ib->length_dw == 0); 133 amdgpu_ring_pad_ib(ring, ib); 134 135 if (p->needs_flush) 136 atomic64_inc(&p->vm->tlb_seq); 137 138 WARN_ON(ib->length_dw > p->num_dw_left); 139 f = amdgpu_job_submit(p->job); 140 141 if (p->unlocked) { 142 struct dma_fence *tmp = dma_fence_get(f); 143 144 swap(p->vm->last_unlocked, tmp); 145 dma_fence_put(tmp); 146 } else { 147 dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f, 148 DMA_RESV_USAGE_BOOKKEEP); 149 } 150 151 if (fence && !p->immediate) { 152 /* 153 * Most hw generations now have a separate queue for page table 154 * updates, but when the queue is shared with userspace we need 155 * the extra CPU round trip to correctly flush the TLB. 156 */ 157 set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags); 158 swap(*fence, f); 159 } 160 dma_fence_put(f); 161 return 0; 162 } 163 164 /** 165 * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping 166 * 167 * @p: see amdgpu_vm_update_params definition 168 * @bo: PD/PT to update 169 * @pe: addr of the page entry 170 * @count: number of page entries to copy 171 * 172 * Traces the parameters and calls the DMA function to copy the PTEs. 173 */ 174 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, 175 struct amdgpu_bo *bo, uint64_t pe, 176 unsigned count) 177 { 178 struct amdgpu_ib *ib = p->job->ibs; 179 uint64_t src = ib->gpu_addr; 180 181 src += p->num_dw_left * 4; 182 183 pe += amdgpu_bo_gpu_offset_no_check(bo); 184 trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate); 185 186 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); 187 } 188 189 /** 190 * amdgpu_vm_sdma_set_ptes - helper to call the right asic function 191 * 192 * @p: see amdgpu_vm_update_params definition 193 * @bo: PD/PT to update 194 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB 195 * @addr: dst addr to write into pe 196 * @count: number of page entries to update 197 * @incr: increase next addr by incr bytes 198 * @flags: hw access flags 199 * 200 * Traces the parameters and calls the right asic functions 201 * to setup the page table using the DMA. 202 */ 203 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, 204 struct amdgpu_bo *bo, uint64_t pe, 205 uint64_t addr, unsigned count, 206 uint32_t incr, uint64_t flags) 207 { 208 struct amdgpu_ib *ib = p->job->ibs; 209 210 pe += amdgpu_bo_gpu_offset_no_check(bo); 211 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 212 if (count < 3) { 213 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, 214 count, incr); 215 } else { 216 amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr, 217 count, incr, flags); 218 } 219 } 220 221 /** 222 * amdgpu_vm_sdma_update - execute VM update 223 * 224 * @p: see amdgpu_vm_update_params definition 225 * @vmbo: PD/PT to update 226 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB 227 * @addr: dst addr to write into pe 228 * @count: number of page entries to update 229 * @incr: increase next addr by incr bytes 230 * @flags: hw access flags 231 * 232 * Reserve space in the IB, setup mapping buffer on demand and write commands to 233 * the IB. 234 */ 235 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, 236 struct amdgpu_bo_vm *vmbo, uint64_t pe, 237 uint64_t addr, unsigned count, uint32_t incr, 238 uint64_t flags) 239 { 240 struct amdgpu_bo *bo = &vmbo->bo; 241 struct dma_resv_iter cursor; 242 unsigned int i, ndw, nptes; 243 struct dma_fence *fence; 244 uint64_t *pte; 245 int r; 246 247 /* Wait for PD/PT moves to be completed */ 248 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); 249 dma_resv_for_each_fence_unlocked(&cursor, fence) { 250 dma_fence_get(fence); 251 r = drm_sched_job_add_dependency(&p->job->base, fence); 252 if (r) { 253 dma_fence_put(fence); 254 dma_resv_iter_end(&cursor); 255 return r; 256 } 257 } 258 dma_resv_iter_end(&cursor); 259 260 do { 261 ndw = p->num_dw_left; 262 ndw -= p->job->ibs->length_dw; 263 264 if (ndw < 32) { 265 r = amdgpu_vm_sdma_commit(p, NULL); 266 if (r) 267 return r; 268 269 r = amdgpu_vm_sdma_alloc_job(p, count); 270 if (r) 271 return r; 272 } 273 274 if (!p->pages_addr) { 275 /* set page commands needed */ 276 if (vmbo->shadow) 277 amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr, 278 count, incr, flags); 279 amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, 280 incr, flags); 281 return 0; 282 } 283 284 /* copy commands needed */ 285 ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * 286 (vmbo->shadow ? 2 : 1); 287 288 /* for padding */ 289 ndw -= 7; 290 291 nptes = min(count, ndw / 2); 292 293 /* Put the PTEs at the end of the IB. */ 294 p->num_dw_left -= nptes * 2; 295 pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]); 296 for (i = 0; i < nptes; ++i, addr += incr) { 297 pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr); 298 pte[i] |= flags; 299 } 300 301 if (vmbo->shadow) 302 amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes); 303 amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); 304 305 pe += nptes * 8; 306 count -= nptes; 307 } while (count); 308 309 return 0; 310 } 311 312 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = { 313 .map_table = amdgpu_vm_sdma_map_table, 314 .prepare = amdgpu_vm_sdma_prepare, 315 .update = amdgpu_vm_sdma_update, 316 .commit = amdgpu_vm_sdma_commit 317 }; 318