1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "amdgpu_vm.h" 24 #include "amdgpu_object.h" 25 #include "amdgpu_trace.h" 26 27 /** 28 * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped 29 * 30 * @table: newly allocated or validated PD/PT 31 */ 32 static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) 33 { 34 table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 35 return amdgpu_bo_kmap(&table->bo, NULL); 36 } 37 38 /** 39 * amdgpu_vm_cpu_prepare - prepare page table update with the CPU 40 * 41 * @p: see amdgpu_vm_update_params definition 42 * @sync: sync obj with fences to wait on 43 * 44 * Returns: 45 * Negativ errno, 0 for success. 46 */ 47 static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, 48 struct amdgpu_sync *sync) 49 { 50 if (!sync) 51 return 0; 52 53 return amdgpu_sync_wait(sync, true); 54 } 55 56 /** 57 * amdgpu_vm_cpu_update - helper to update page tables via CPU 58 * 59 * @p: see amdgpu_vm_update_params definition 60 * @vmbo: PD/PT to update 61 * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB 62 * @addr: dst addr to write into pe 63 * @count: number of page entries to update 64 * @incr: increase next addr by incr bytes 65 * @flags: hw access flags 66 * 67 * Write count number of PT/PD entries directly. 68 */ 69 static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, 70 struct amdgpu_bo_vm *vmbo, uint64_t pe, 71 uint64_t addr, unsigned count, uint32_t incr, 72 uint64_t flags) 73 { 74 unsigned int i; 75 uint64_t value; 76 long r; 77 78 r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, 79 true, MAX_SCHEDULE_TIMEOUT); 80 if (r < 0) 81 return r; 82 83 pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); 84 85 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 86 87 for (i = 0; i < count; i++) { 88 value = p->pages_addr ? 89 amdgpu_vm_map_gart(p->pages_addr, addr) : 90 addr; 91 amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe, 92 i, value, flags); 93 addr += incr; 94 } 95 return 0; 96 } 97 98 /** 99 * amdgpu_vm_cpu_commit - commit page table update to the HW 100 * 101 * @p: see amdgpu_vm_update_params definition 102 * @fence: unused 103 * 104 * Make sure that the hardware sees the page table updates. 105 */ 106 static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, 107 struct dma_fence **fence) 108 { 109 if (p->needs_flush) 110 atomic64_inc(&p->vm->tlb_seq); 111 112 mb(); 113 amdgpu_device_flush_hdp(p->adev, NULL); 114 return 0; 115 } 116 117 const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = { 118 .map_table = amdgpu_vm_cpu_map_table, 119 .prepare = amdgpu_vm_cpu_prepare, 120 .update = amdgpu_vm_cpu_update, 121 .commit = amdgpu_vm_cpu_commit 122 }; 123