1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_res_cursor.h"
34
35 #ifdef CONFIG_MMU_NOTIFIER
36 #include <linux/mmu_notifier.h>
37 #endif
38
39 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
40 #define AMDGPU_BO_MAX_PLACEMENTS 3
41
42 /* BO flag to indicate a KFD userptr BO */
43 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
44
45 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
46 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
47
48 struct amdgpu_bo_param {
49 unsigned long size;
50 int byte_align;
51 u32 bo_ptr_size;
52 u32 domain;
53 u32 preferred_domain;
54 u64 flags;
55 enum ttm_bo_type type;
56 bool no_wait_gpu;
57 struct dma_resv *resv;
58 void (*destroy)(struct ttm_buffer_object *bo);
59 /* xcp partition number plus 1, 0 means any partition */
60 int8_t xcp_id_plus1;
61 };
62
63 /* bo virtual addresses in a vm */
64 struct amdgpu_bo_va_mapping {
65 struct amdgpu_bo_va *bo_va;
66 struct list_head list;
67 struct rb_node rb;
68 uint64_t start;
69 uint64_t last;
70 uint64_t __subtree_last;
71 uint64_t offset;
72 uint64_t flags;
73 };
74
75 /* User space allocated BO in a VM */
76 struct amdgpu_bo_va {
77 struct amdgpu_vm_bo_base base;
78
79 /* protected by bo being reserved */
80 unsigned ref_count;
81
82 /* all other members protected by the VM PD being reserved */
83 struct dma_fence *last_pt_update;
84
85 /* mappings for this bo_va */
86 struct list_head invalids;
87 struct list_head valids;
88
89 /* If the mappings are cleared or filled */
90 bool cleared;
91
92 bool is_xgmi;
93
94 /*
95 * protected by vm reservation lock
96 * if non-zero, cannot unmap from GPU because user queues may still access it
97 */
98 unsigned int queue_refcount;
99 };
100
101 struct amdgpu_bo {
102 /* Protected by tbo.reserved */
103 u32 preferred_domains;
104 u32 allowed_domains;
105 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
106 struct ttm_placement placement;
107 struct ttm_buffer_object tbo;
108 struct ttm_bo_kmap_obj kmap;
109 u64 flags;
110 /* per VM structure for page tables and with virtual addresses */
111 struct amdgpu_vm_bo_base *vm_bo;
112 /* Constant after initialization */
113 struct amdgpu_bo *parent;
114
115 #ifdef CONFIG_MMU_NOTIFIER
116 struct mmu_interval_notifier notifier;
117 #endif
118 struct kgd_mem *kfd_bo;
119
120 /*
121 * For GPUs with spatial partitioning, xcp partition number, -1 means
122 * any partition. For other ASICs without spatial partition, always 0
123 * for memory accounting.
124 */
125 int8_t xcp_id;
126 };
127
128 struct amdgpu_bo_user {
129 struct amdgpu_bo bo;
130 u64 tiling_flags;
131 u64 metadata_flags;
132 void *metadata;
133 u32 metadata_size;
134
135 };
136
137 struct amdgpu_bo_vm {
138 struct amdgpu_bo bo;
139 struct amdgpu_vm_bo_base entries[];
140 };
141
ttm_to_amdgpu_bo(struct ttm_buffer_object * tbo)142 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
143 {
144 return container_of(tbo, struct amdgpu_bo, tbo);
145 }
146
147 /**
148 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
149 * @mem_type: ttm memory type
150 *
151 * Returns corresponding domain of the ttm mem_type
152 */
amdgpu_mem_type_to_domain(u32 mem_type)153 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
154 {
155 switch (mem_type) {
156 case TTM_PL_VRAM:
157 return AMDGPU_GEM_DOMAIN_VRAM;
158 case TTM_PL_TT:
159 return AMDGPU_GEM_DOMAIN_GTT;
160 case TTM_PL_SYSTEM:
161 return AMDGPU_GEM_DOMAIN_CPU;
162 case AMDGPU_PL_GDS:
163 return AMDGPU_GEM_DOMAIN_GDS;
164 case AMDGPU_PL_GWS:
165 return AMDGPU_GEM_DOMAIN_GWS;
166 case AMDGPU_PL_OA:
167 return AMDGPU_GEM_DOMAIN_OA;
168 case AMDGPU_PL_DOORBELL:
169 return AMDGPU_GEM_DOMAIN_DOORBELL;
170 default:
171 break;
172 }
173 return 0;
174 }
175
176 /**
177 * amdgpu_bo_reserve - reserve bo
178 * @bo: bo structure
179 * @no_intr: don't return -ERESTARTSYS on pending signal
180 *
181 * Returns:
182 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
183 * a signal. Release all buffer reservations and return to user-space.
184 */
amdgpu_bo_reserve(struct amdgpu_bo * bo,bool no_intr)185 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
186 {
187 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
188 int r;
189
190 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
191 if (unlikely(r != 0)) {
192 if (r != -ERESTARTSYS)
193 dev_err(adev->dev, "%p reserve failed\n", bo);
194 return r;
195 }
196 return 0;
197 }
198
amdgpu_bo_unreserve(struct amdgpu_bo * bo)199 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
200 {
201 ttm_bo_unreserve(&bo->tbo);
202 }
203
amdgpu_bo_size(struct amdgpu_bo * bo)204 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
205 {
206 return bo->tbo.base.size;
207 }
208
amdgpu_bo_ngpu_pages(struct amdgpu_bo * bo)209 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
210 {
211 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
212 }
213
amdgpu_bo_gpu_page_alignment(struct amdgpu_bo * bo)214 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
215 {
216 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
217 }
218
219 /**
220 * amdgpu_bo_mmap_offset - return mmap offset of bo
221 * @bo: amdgpu object for which we query the offset
222 *
223 * Returns mmap offset of the object.
224 */
amdgpu_bo_mmap_offset(struct amdgpu_bo * bo)225 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
226 {
227 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
228 }
229
230 /**
231 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
232 */
amdgpu_bo_explicit_sync(struct amdgpu_bo * bo)233 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
234 {
235 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
236 }
237
238 /**
239 * amdgpu_bo_encrypted - test if the BO is encrypted
240 * @bo: pointer to a buffer object
241 *
242 * Return true if the buffer object is encrypted, false otherwise.
243 */
amdgpu_bo_encrypted(struct amdgpu_bo * bo)244 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
245 {
246 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
247 }
248
249 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
250 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
251
252 int amdgpu_bo_create(struct amdgpu_device *adev,
253 struct amdgpu_bo_param *bp,
254 struct amdgpu_bo **bo_ptr);
255 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
256 unsigned long size, int align,
257 u32 domain, struct amdgpu_bo **bo_ptr,
258 u64 *gpu_addr, void **cpu_addr);
259 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
260 unsigned long size, int align,
261 u32 domain, struct amdgpu_bo **bo_ptr,
262 u64 *gpu_addr, void **cpu_addr);
263 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
264 uint64_t offset, uint64_t size,
265 struct amdgpu_bo **bo_ptr, void **cpu_addr);
266 int amdgpu_bo_create_user(struct amdgpu_device *adev,
267 struct amdgpu_bo_param *bp,
268 struct amdgpu_bo_user **ubo_ptr);
269 int amdgpu_bo_create_vm(struct amdgpu_device *adev,
270 struct amdgpu_bo_param *bp,
271 struct amdgpu_bo_vm **ubo_ptr);
272 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
273 void **cpu_addr);
274 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
275 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
276 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
277 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
278 void amdgpu_bo_unref(struct amdgpu_bo **bo);
279 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
280 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
281 int amdgpu_bo_init(struct amdgpu_device *adev);
282 void amdgpu_bo_fini(struct amdgpu_device *adev);
283 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
284 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
285 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
286 uint32_t metadata_size, uint64_t flags);
287 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
288 size_t buffer_size, uint32_t *metadata_size,
289 uint64_t *flags);
290 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
291 bool evict,
292 struct ttm_resource *new_mem);
293 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
294 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
295 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
296 bool shared);
297 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
298 enum amdgpu_sync_mode sync_mode, void *owner,
299 bool intr);
300 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
301 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
302 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
303 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
304 struct amdgpu_mem_stats *stats,
305 unsigned int size);
306 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
307 uint32_t domain);
308
309 /*
310 * sub allocation
311 */
312 static inline struct amdgpu_sa_manager *
to_amdgpu_sa_manager(struct drm_suballoc_manager * manager)313 to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
314 {
315 return container_of(manager, struct amdgpu_sa_manager, base);
316 }
317
amdgpu_sa_bo_gpu_addr(struct drm_suballoc * sa_bo)318 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
319 {
320 return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
321 drm_suballoc_soffset(sa_bo);
322 }
323
amdgpu_sa_bo_cpu_addr(struct drm_suballoc * sa_bo)324 static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
325 {
326 return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
327 drm_suballoc_soffset(sa_bo);
328 }
329
330 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
331 struct amdgpu_sa_manager *sa_manager,
332 unsigned size, u32 align, u32 domain);
333 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
334 struct amdgpu_sa_manager *sa_manager);
335 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
336 struct amdgpu_sa_manager *sa_manager);
337 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
338 struct drm_suballoc **sa_bo,
339 unsigned int size);
340 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
341 struct drm_suballoc **sa_bo,
342 struct dma_fence *fence);
343 #if defined(CONFIG_DEBUG_FS)
344 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
345 struct seq_file *m);
346 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
347 #endif
348 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
349
350 bool amdgpu_bo_support_uswc(u64 bo_flags);
351
352
353 #endif
354