1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_seq64.h" 27 28 #include <drm/drm_exec.h> 29 30 /** 31 * DOC: amdgpu_seq64 32 * 33 * amdgpu_seq64 allocates a 64bit memory on each request in sequence order. 34 * seq64 driver is required for user queue fence memory allocation, TLB 35 * counters and VM updates. It has maximum count of 32768 64 bit slots. 36 */ 37 38 /** 39 * amdgpu_seq64_get_va_base - Get the seq64 va base address 40 * 41 * @adev: amdgpu_device pointer 42 * 43 * Returns: 44 * va base address on success 45 */ 46 static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) 47 { 48 return AMDGPU_VA_RESERVED_SEQ64_START(adev); 49 } 50 51 /** 52 * amdgpu_seq64_map - Map the seq64 memory to VM 53 * 54 * @adev: amdgpu_device pointer 55 * @vm: vm pointer 56 * @bo_va: bo_va pointer 57 * 58 * Map the seq64 memory to the given VM. 59 * 60 * Returns: 61 * 0 on success or a negative error code on failure 62 */ 63 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, 64 struct amdgpu_bo_va **bo_va) 65 { 66 struct amdgpu_bo *bo; 67 struct drm_exec exec; 68 u64 seq64_addr; 69 int r; 70 71 bo = adev->seq64.sbo; 72 if (!bo) 73 return -EINVAL; 74 75 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 76 drm_exec_until_all_locked(&exec) { 77 r = amdgpu_vm_lock_pd(vm, &exec, 0); 78 if (likely(!r)) 79 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 80 drm_exec_retry_on_contention(&exec); 81 if (unlikely(r)) 82 goto error; 83 } 84 85 *bo_va = amdgpu_vm_bo_add(adev, vm, bo); 86 if (!*bo_va) { 87 r = -ENOMEM; 88 goto error; 89 } 90 91 seq64_addr = amdgpu_seq64_get_va_base(adev); 92 r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, 93 AMDGPU_PTE_READABLE); 94 if (r) { 95 DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); 96 amdgpu_vm_bo_del(adev, *bo_va); 97 goto error; 98 } 99 100 r = amdgpu_vm_bo_update(adev, *bo_va, false); 101 if (r) { 102 DRM_ERROR("failed to do vm_bo_update on userq sem\n"); 103 amdgpu_vm_bo_del(adev, *bo_va); 104 goto error; 105 } 106 107 error: 108 drm_exec_fini(&exec); 109 return r; 110 } 111 112 /** 113 * amdgpu_seq64_unmap - Unmap the seq64 memory 114 * 115 * @adev: amdgpu_device pointer 116 * @fpriv: DRM file private 117 * 118 * Unmap the seq64 memory from the given VM. 119 */ 120 void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) 121 { 122 struct amdgpu_vm *vm; 123 struct amdgpu_bo *bo; 124 struct drm_exec exec; 125 int r; 126 127 if (!fpriv->seq64_va) 128 return; 129 130 bo = adev->seq64.sbo; 131 if (!bo) 132 return; 133 134 vm = &fpriv->vm; 135 136 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 137 drm_exec_until_all_locked(&exec) { 138 r = amdgpu_vm_lock_pd(vm, &exec, 0); 139 if (likely(!r)) 140 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 141 drm_exec_retry_on_contention(&exec); 142 if (unlikely(r)) 143 goto error; 144 } 145 146 amdgpu_vm_bo_del(adev, fpriv->seq64_va); 147 148 fpriv->seq64_va = NULL; 149 150 error: 151 drm_exec_fini(&exec); 152 } 153 154 /** 155 * amdgpu_seq64_alloc - Allocate a 64 bit memory 156 * 157 * @adev: amdgpu_device pointer 158 * @va: VA to access the seq in process address space 159 * @cpu_addr: CPU address to access the seq 160 * 161 * Alloc a 64 bit memory from seq64 pool. 162 * 163 * Returns: 164 * 0 on success or a negative error code on failure 165 */ 166 int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) 167 { 168 unsigned long bit_pos; 169 170 bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); 171 if (bit_pos >= adev->seq64.num_sem) 172 return -ENOSPC; 173 174 __set_bit(bit_pos, adev->seq64.used); 175 *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev); 176 *cpu_addr = bit_pos + adev->seq64.cpu_base_addr; 177 178 return 0; 179 } 180 181 /** 182 * amdgpu_seq64_free - Free the given 64 bit memory 183 * 184 * @adev: amdgpu_device pointer 185 * @va: gpu start address to be freed 186 * 187 * Free the given 64 bit memory from seq64 pool. 188 */ 189 void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va) 190 { 191 unsigned long bit_pos; 192 193 bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64); 194 if (bit_pos < adev->seq64.num_sem) 195 __clear_bit(bit_pos, adev->seq64.used); 196 } 197 198 /** 199 * amdgpu_seq64_fini - Cleanup seq64 driver 200 * 201 * @adev: amdgpu_device pointer 202 * 203 * Free the memory space allocated for seq64. 204 * 205 */ 206 void amdgpu_seq64_fini(struct amdgpu_device *adev) 207 { 208 amdgpu_bo_free_kernel(&adev->seq64.sbo, 209 NULL, 210 (void **)&adev->seq64.cpu_base_addr); 211 } 212 213 /** 214 * amdgpu_seq64_init - Initialize seq64 driver 215 * 216 * @adev: amdgpu_device pointer 217 * 218 * Allocate the required memory space for seq64. 219 * 220 * Returns: 221 * 0 on success or a negative error code on failure 222 */ 223 int amdgpu_seq64_init(struct amdgpu_device *adev) 224 { 225 int r; 226 227 if (adev->seq64.sbo) 228 return 0; 229 230 /* 231 * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS 232 * 64bit slots 233 */ 234 r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE, 235 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 236 &adev->seq64.sbo, NULL, 237 (void **)&adev->seq64.cpu_base_addr); 238 if (r) { 239 dev_warn(adev->dev, "(%d) create seq64 failed\n", r); 240 return r; 241 } 242 243 memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE); 244 245 adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; 246 memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); 247 248 return 0; 249 } 250