1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_seq64.h" 27 28 #include <drm/drm_exec.h> 29 30 /** 31 * DOC: amdgpu_seq64 32 * 33 * amdgpu_seq64 allocates a 64bit memory on each request in sequence order. 34 * seq64 driver is required for user queue fence memory allocation, TLB 35 * counters and VM updates. It has maximum count of 32768 64 bit slots. 36 */ 37 38 /** 39 * amdgpu_seq64_get_va_base - Get the seq64 va base address 40 * 41 * @adev: amdgpu_device pointer 42 * 43 * Returns: 44 * va base address on success 45 */ 46 static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) 47 { 48 u64 addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; 49 50 addr -= AMDGPU_VA_RESERVED_TOP; 51 52 return addr; 53 } 54 55 /** 56 * amdgpu_seq64_map - Map the seq64 memory to VM 57 * 58 * @adev: amdgpu_device pointer 59 * @vm: vm pointer 60 * @bo_va: bo_va pointer 61 * 62 * Map the seq64 memory to the given VM. 63 * 64 * Returns: 65 * 0 on success or a negative error code on failure 66 */ 67 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, 68 struct amdgpu_bo_va **bo_va) 69 { 70 struct amdgpu_bo *bo; 71 struct drm_exec exec; 72 u64 seq64_addr; 73 int r; 74 75 bo = adev->seq64.sbo; 76 if (!bo) 77 return -EINVAL; 78 79 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 80 drm_exec_until_all_locked(&exec) { 81 r = amdgpu_vm_lock_pd(vm, &exec, 0); 82 if (likely(!r)) 83 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 84 drm_exec_retry_on_contention(&exec); 85 if (unlikely(r)) 86 goto error; 87 } 88 89 *bo_va = amdgpu_vm_bo_add(adev, vm, bo); 90 if (!*bo_va) { 91 r = -ENOMEM; 92 goto error; 93 } 94 95 seq64_addr = amdgpu_seq64_get_va_base(adev); 96 r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, 97 AMDGPU_PTE_READABLE); 98 if (r) { 99 DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); 100 amdgpu_vm_bo_del(adev, *bo_va); 101 goto error; 102 } 103 104 r = amdgpu_vm_bo_update(adev, *bo_va, false); 105 if (r) { 106 DRM_ERROR("failed to do vm_bo_update on userq sem\n"); 107 amdgpu_vm_bo_del(adev, *bo_va); 108 goto error; 109 } 110 111 error: 112 drm_exec_fini(&exec); 113 return r; 114 } 115 116 /** 117 * amdgpu_seq64_unmap - Unmap the seq64 memory 118 * 119 * @adev: amdgpu_device pointer 120 * @fpriv: DRM file private 121 * 122 * Unmap the seq64 memory from the given VM. 123 */ 124 void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) 125 { 126 struct amdgpu_vm *vm; 127 struct amdgpu_bo *bo; 128 struct drm_exec exec; 129 int r; 130 131 if (!fpriv->seq64_va) 132 return; 133 134 bo = adev->seq64.sbo; 135 if (!bo) 136 return; 137 138 vm = &fpriv->vm; 139 140 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 141 drm_exec_until_all_locked(&exec) { 142 r = amdgpu_vm_lock_pd(vm, &exec, 0); 143 if (likely(!r)) 144 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 145 drm_exec_retry_on_contention(&exec); 146 if (unlikely(r)) 147 goto error; 148 } 149 150 amdgpu_vm_bo_del(adev, fpriv->seq64_va); 151 152 fpriv->seq64_va = NULL; 153 154 error: 155 drm_exec_fini(&exec); 156 } 157 158 /** 159 * amdgpu_seq64_alloc - Allocate a 64 bit memory 160 * 161 * @adev: amdgpu_device pointer 162 * @va: VA to access the seq in process address space 163 * @cpu_addr: CPU address to access the seq 164 * 165 * Alloc a 64 bit memory from seq64 pool. 166 * 167 * Returns: 168 * 0 on success or a negative error code on failure 169 */ 170 int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) 171 { 172 unsigned long bit_pos; 173 174 bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); 175 if (bit_pos >= adev->seq64.num_sem) 176 return -ENOSPC; 177 178 __set_bit(bit_pos, adev->seq64.used); 179 *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev); 180 *cpu_addr = bit_pos + adev->seq64.cpu_base_addr; 181 182 return 0; 183 } 184 185 /** 186 * amdgpu_seq64_free - Free the given 64 bit memory 187 * 188 * @adev: amdgpu_device pointer 189 * @va: gpu start address to be freed 190 * 191 * Free the given 64 bit memory from seq64 pool. 192 */ 193 void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va) 194 { 195 unsigned long bit_pos; 196 197 bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64); 198 if (bit_pos < adev->seq64.num_sem) 199 __clear_bit(bit_pos, adev->seq64.used); 200 } 201 202 /** 203 * amdgpu_seq64_fini - Cleanup seq64 driver 204 * 205 * @adev: amdgpu_device pointer 206 * 207 * Free the memory space allocated for seq64. 208 * 209 */ 210 void amdgpu_seq64_fini(struct amdgpu_device *adev) 211 { 212 amdgpu_bo_free_kernel(&adev->seq64.sbo, 213 NULL, 214 (void **)&adev->seq64.cpu_base_addr); 215 } 216 217 /** 218 * amdgpu_seq64_init - Initialize seq64 driver 219 * 220 * @adev: amdgpu_device pointer 221 * 222 * Allocate the required memory space for seq64. 223 * 224 * Returns: 225 * 0 on success or a negative error code on failure 226 */ 227 int amdgpu_seq64_init(struct amdgpu_device *adev) 228 { 229 int r; 230 231 if (adev->seq64.sbo) 232 return 0; 233 234 /* 235 * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS 236 * 64bit slots 237 */ 238 r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE, 239 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 240 &adev->seq64.sbo, NULL, 241 (void **)&adev->seq64.cpu_base_addr); 242 if (r) { 243 dev_warn(adev->dev, "(%d) create seq64 failed\n", r); 244 return r; 245 } 246 247 memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE); 248 249 adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; 250 memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); 251 252 return 0; 253 } 254