1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_seq64.h"
27
28 #include <drm/drm_exec.h>
29
30 /**
31 * DOC: amdgpu_seq64
32 *
33 * amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
34 * seq64 driver is required for user queue fence memory allocation, TLB
35 * counters and VM updates. It has maximum count of 32768 64 bit slots.
36 */
37
38 /**
39 * amdgpu_seq64_get_va_base - Get the seq64 va base address
40 *
41 * @adev: amdgpu_device pointer
42 *
43 * Returns:
44 * va base address on success
45 */
amdgpu_seq64_get_va_base(struct amdgpu_device * adev)46 static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
47 {
48 u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
49
50 addr = amdgpu_gmc_sign_extend(addr);
51
52 return addr;
53 }
54
55 /**
56 * amdgpu_seq64_map - Map the seq64 memory to VM
57 *
58 * @adev: amdgpu_device pointer
59 * @vm: vm pointer
60 * @bo_va: bo_va pointer
61 *
62 * Map the seq64 memory to the given VM.
63 *
64 * Returns:
65 * 0 on success or a negative error code on failure
66 */
amdgpu_seq64_map(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va ** bo_va)67 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
68 struct amdgpu_bo_va **bo_va)
69 {
70 u64 seq64_addr, va_flags;
71 struct amdgpu_bo *bo;
72 struct drm_exec exec;
73 int r;
74
75 bo = adev->seq64.sbo;
76 if (!bo)
77 return -EINVAL;
78
79 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
80 drm_exec_until_all_locked(&exec) {
81 r = amdgpu_vm_lock_pd(vm, &exec, 0);
82 if (likely(!r))
83 r = drm_exec_lock_obj(&exec, &bo->tbo.base);
84 drm_exec_retry_on_contention(&exec);
85 if (unlikely(r))
86 goto error;
87 }
88
89 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
90 if (!*bo_va) {
91 r = -ENOMEM;
92 goto error;
93 }
94
95 seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
96
97 va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
98 r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
99 va_flags);
100 if (r) {
101 DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
102 amdgpu_vm_bo_del(adev, *bo_va);
103 goto error;
104 }
105
106 r = amdgpu_vm_bo_update(adev, *bo_va, false);
107 if (r) {
108 DRM_ERROR("failed to do vm_bo_update on userq sem\n");
109 amdgpu_vm_bo_del(adev, *bo_va);
110 goto error;
111 }
112
113 error:
114 drm_exec_fini(&exec);
115 return r;
116 }
117
118 /**
119 * amdgpu_seq64_unmap - Unmap the seq64 memory
120 *
121 * @adev: amdgpu_device pointer
122 * @fpriv: DRM file private
123 *
124 * Unmap the seq64 memory from the given VM.
125 */
amdgpu_seq64_unmap(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv)126 void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
127 {
128 struct amdgpu_vm *vm;
129 struct amdgpu_bo *bo;
130 struct drm_exec exec;
131 int r;
132
133 if (!fpriv->seq64_va)
134 return;
135
136 bo = adev->seq64.sbo;
137 if (!bo)
138 return;
139
140 vm = &fpriv->vm;
141
142 drm_exec_init(&exec, 0, 0);
143 drm_exec_until_all_locked(&exec) {
144 r = amdgpu_vm_lock_pd(vm, &exec, 0);
145 if (likely(!r))
146 r = drm_exec_lock_obj(&exec, &bo->tbo.base);
147 drm_exec_retry_on_contention(&exec);
148 if (unlikely(r))
149 goto error;
150 }
151
152 amdgpu_vm_bo_del(adev, fpriv->seq64_va);
153
154 fpriv->seq64_va = NULL;
155
156 error:
157 drm_exec_fini(&exec);
158 }
159
160 /**
161 * amdgpu_seq64_alloc - Allocate a 64 bit memory
162 *
163 * @adev: amdgpu_device pointer
164 * @va: VA to access the seq in process address space
165 * @gpu_addr: GPU address to access the seq
166 * @cpu_addr: CPU address to access the seq
167 *
168 * Alloc a 64 bit memory from seq64 pool.
169 *
170 * Returns:
171 * 0 on success or a negative error code on failure
172 */
amdgpu_seq64_alloc(struct amdgpu_device * adev,u64 * va,u64 * gpu_addr,u64 ** cpu_addr)173 int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
174 u64 *gpu_addr, u64 **cpu_addr)
175 {
176 unsigned long bit_pos;
177
178 bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
179 if (bit_pos >= adev->seq64.num_sem)
180 return -ENOSPC;
181
182 __set_bit(bit_pos, adev->seq64.used);
183
184 *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
185
186 if (gpu_addr)
187 *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
188
189 *cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
190
191 return 0;
192 }
193
194 /**
195 * amdgpu_seq64_free - Free the given 64 bit memory
196 *
197 * @adev: amdgpu_device pointer
198 * @va: gpu start address to be freed
199 *
200 * Free the given 64 bit memory from seq64 pool.
201 */
amdgpu_seq64_free(struct amdgpu_device * adev,u64 va)202 void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
203 {
204 unsigned long bit_pos;
205
206 bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
207 if (bit_pos < adev->seq64.num_sem)
208 __clear_bit(bit_pos, adev->seq64.used);
209 }
210
211 /**
212 * amdgpu_seq64_fini - Cleanup seq64 driver
213 *
214 * @adev: amdgpu_device pointer
215 *
216 * Free the memory space allocated for seq64.
217 *
218 */
amdgpu_seq64_fini(struct amdgpu_device * adev)219 void amdgpu_seq64_fini(struct amdgpu_device *adev)
220 {
221 amdgpu_bo_free_kernel(&adev->seq64.sbo,
222 NULL,
223 (void **)&adev->seq64.cpu_base_addr);
224 }
225
226 /**
227 * amdgpu_seq64_init - Initialize seq64 driver
228 *
229 * @adev: amdgpu_device pointer
230 *
231 * Allocate the required memory space for seq64.
232 *
233 * Returns:
234 * 0 on success or a negative error code on failure
235 */
amdgpu_seq64_init(struct amdgpu_device * adev)236 int amdgpu_seq64_init(struct amdgpu_device *adev)
237 {
238 int r;
239
240 if (adev->seq64.sbo)
241 return 0;
242
243 /*
244 * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
245 * 64bit slots
246 */
247 r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
248 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
249 &adev->seq64.sbo, &adev->seq64.gpu_addr,
250 (void **)&adev->seq64.cpu_base_addr);
251 if (r) {
252 dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
253 return r;
254 }
255
256 memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
257
258 adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
259 memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
260
261 return 0;
262 }
263