xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h (revision 954ea91fb68b771dba6d87cfa61b68e09cc2497f)
1  /*
2   * Copyright 2008 Advanced Micro Devices, Inc.
3   * Copyright 2008 Red Hat Inc.
4   * Copyright 2009 Jerome Glisse.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the "Software"),
8   * to deal in the Software without restriction, including without limitation
9   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10   * and/or sell copies of the Software, and to permit persons to whom the
11   * Software is furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22   * OTHER DEALINGS IN THE SOFTWARE.
23   *
24   * Authors: Dave Airlie
25   *          Alex Deucher
26   *          Jerome Glisse
27   */
28  #ifndef __AMDGPU_OBJECT_H__
29  #define __AMDGPU_OBJECT_H__
30  
31  #include <drm/amdgpu_drm.h>
32  #include "amdgpu.h"
33  #include "amdgpu_res_cursor.h"
34  
35  #ifdef CONFIG_MMU_NOTIFIER
36  #include <linux/mmu_notifier.h>
37  #endif
38  
39  #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
40  #define AMDGPU_BO_MAX_PLACEMENTS	3
41  
42  /* BO flag to indicate a KFD userptr BO */
43  #define AMDGPU_AMDKFD_CREATE_USERPTR_BO	(1ULL << 63)
44  
45  #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
46  #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
47  
48  struct amdgpu_bo_param {
49  	unsigned long			size;
50  	int				byte_align;
51  	u32				bo_ptr_size;
52  	u32				domain;
53  	u32				preferred_domain;
54  	u64				flags;
55  	enum ttm_bo_type		type;
56  	bool				no_wait_gpu;
57  	struct dma_resv			*resv;
58  	void				(*destroy)(struct ttm_buffer_object *bo);
59  };
60  
61  /* bo virtual addresses in a vm */
62  struct amdgpu_bo_va_mapping {
63  	struct amdgpu_bo_va		*bo_va;
64  	struct list_head		list;
65  	struct rb_node			rb;
66  	uint64_t			start;
67  	uint64_t			last;
68  	uint64_t			__subtree_last;
69  	uint64_t			offset;
70  	uint64_t			flags;
71  };
72  
73  /* User space allocated BO in a VM */
74  struct amdgpu_bo_va {
75  	struct amdgpu_vm_bo_base	base;
76  
77  	/* protected by bo being reserved */
78  	unsigned			ref_count;
79  
80  	/* all other members protected by the VM PD being reserved */
81  	struct dma_fence	        *last_pt_update;
82  
83  	/* mappings for this bo_va */
84  	struct list_head		invalids;
85  	struct list_head		valids;
86  
87  	/* If the mappings are cleared or filled */
88  	bool				cleared;
89  
90  	bool				is_xgmi;
91  };
92  
93  struct amdgpu_bo {
94  	/* Protected by tbo.reserved */
95  	u32				preferred_domains;
96  	u32				allowed_domains;
97  	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
98  	struct ttm_placement		placement;
99  	struct ttm_buffer_object	tbo;
100  	struct ttm_bo_kmap_obj		kmap;
101  	u64				flags;
102  	/* per VM structure for page tables and with virtual addresses */
103  	struct amdgpu_vm_bo_base	*vm_bo;
104  	/* Constant after initialization */
105  	struct amdgpu_bo		*parent;
106  
107  #ifdef CONFIG_MMU_NOTIFIER
108  	struct mmu_interval_notifier	notifier;
109  #endif
110  	struct kgd_mem                  *kfd_bo;
111  };
112  
113  struct amdgpu_bo_user {
114  	struct amdgpu_bo		bo;
115  	u64				tiling_flags;
116  	u64				metadata_flags;
117  	void				*metadata;
118  	u32				metadata_size;
119  
120  };
121  
122  struct amdgpu_bo_vm {
123  	struct amdgpu_bo		bo;
124  	struct amdgpu_bo		*shadow;
125  	struct list_head		shadow_list;
126  	struct amdgpu_vm_bo_base        entries[];
127  };
128  
129  static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
130  {
131  	return container_of(tbo, struct amdgpu_bo, tbo);
132  }
133  
134  /**
135   * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
136   * @mem_type:	ttm memory type
137   *
138   * Returns corresponding domain of the ttm mem_type
139   */
140  static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
141  {
142  	switch (mem_type) {
143  	case TTM_PL_VRAM:
144  		return AMDGPU_GEM_DOMAIN_VRAM;
145  	case TTM_PL_TT:
146  		return AMDGPU_GEM_DOMAIN_GTT;
147  	case TTM_PL_SYSTEM:
148  		return AMDGPU_GEM_DOMAIN_CPU;
149  	case AMDGPU_PL_GDS:
150  		return AMDGPU_GEM_DOMAIN_GDS;
151  	case AMDGPU_PL_GWS:
152  		return AMDGPU_GEM_DOMAIN_GWS;
153  	case AMDGPU_PL_OA:
154  		return AMDGPU_GEM_DOMAIN_OA;
155  	default:
156  		break;
157  	}
158  	return 0;
159  }
160  
161  /**
162   * amdgpu_bo_reserve - reserve bo
163   * @bo:		bo structure
164   * @no_intr:	don't return -ERESTARTSYS on pending signal
165   *
166   * Returns:
167   * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
168   * a signal. Release all buffer reservations and return to user-space.
169   */
170  static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
171  {
172  	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
173  	int r;
174  
175  	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
176  	if (unlikely(r != 0)) {
177  		if (r != -ERESTARTSYS)
178  			dev_err(adev->dev, "%p reserve failed\n", bo);
179  		return r;
180  	}
181  	return 0;
182  }
183  
184  static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
185  {
186  	ttm_bo_unreserve(&bo->tbo);
187  }
188  
189  static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
190  {
191  	return bo->tbo.base.size;
192  }
193  
194  static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
195  {
196  	return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
197  }
198  
199  static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
200  {
201  	return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
202  }
203  
204  /**
205   * amdgpu_bo_mmap_offset - return mmap offset of bo
206   * @bo:	amdgpu object for which we query the offset
207   *
208   * Returns mmap offset of the object.
209   */
210  static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
211  {
212  	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
213  }
214  
215  /**
216   * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
217   */
218  static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
219  {
220  	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
221  	struct amdgpu_res_cursor cursor;
222  
223  	if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
224  		return false;
225  
226  	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
227  	while (cursor.remaining) {
228  		if (cursor.start < adev->gmc.visible_vram_size)
229  			return true;
230  
231  		amdgpu_res_next(&cursor, cursor.size);
232  	}
233  
234  	return false;
235  }
236  
237  /**
238   * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
239   */
240  static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
241  {
242  	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
243  }
244  
245  /**
246   * amdgpu_bo_encrypted - test if the BO is encrypted
247   * @bo: pointer to a buffer object
248   *
249   * Return true if the buffer object is encrypted, false otherwise.
250   */
251  static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
252  {
253  	return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
254  }
255  
256  /**
257   * amdgpu_bo_shadowed - check if the BO is shadowed
258   *
259   * @bo: BO to be tested.
260   *
261   * Returns:
262   * NULL if not shadowed or else return a BO pointer.
263   */
264  static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
265  {
266  	if (bo->tbo.type == ttm_bo_type_kernel)
267  		return to_amdgpu_bo_vm(bo)->shadow;
268  
269  	return NULL;
270  }
271  
272  bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
273  void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
274  
275  int amdgpu_bo_create(struct amdgpu_device *adev,
276  		     struct amdgpu_bo_param *bp,
277  		     struct amdgpu_bo **bo_ptr);
278  int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
279  			      unsigned long size, int align,
280  			      u32 domain, struct amdgpu_bo **bo_ptr,
281  			      u64 *gpu_addr, void **cpu_addr);
282  int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
283  			    unsigned long size, int align,
284  			    u32 domain, struct amdgpu_bo **bo_ptr,
285  			    u64 *gpu_addr, void **cpu_addr);
286  int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
287  			       uint64_t offset, uint64_t size,
288  			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
289  int amdgpu_bo_create_user(struct amdgpu_device *adev,
290  			  struct amdgpu_bo_param *bp,
291  			  struct amdgpu_bo_user **ubo_ptr);
292  int amdgpu_bo_create_vm(struct amdgpu_device *adev,
293  			struct amdgpu_bo_param *bp,
294  			struct amdgpu_bo_vm **ubo_ptr);
295  void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
296  			   void **cpu_addr);
297  int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
298  void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
299  void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
300  struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
301  void amdgpu_bo_unref(struct amdgpu_bo **bo);
302  int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
303  int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
304  			     u64 min_offset, u64 max_offset);
305  void amdgpu_bo_unpin(struct amdgpu_bo *bo);
306  int amdgpu_bo_init(struct amdgpu_device *adev);
307  void amdgpu_bo_fini(struct amdgpu_device *adev);
308  int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
309  void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
310  int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
311  			    uint32_t metadata_size, uint64_t flags);
312  int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
313  			   size_t buffer_size, uint32_t *metadata_size,
314  			   uint64_t *flags);
315  void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
316  			   bool evict,
317  			   struct ttm_resource *new_mem);
318  void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
319  vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
320  void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
321  		     bool shared);
322  int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
323  			     enum amdgpu_sync_mode sync_mode, void *owner,
324  			     bool intr);
325  int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
326  u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
327  u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
328  void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
329  				uint64_t *gtt_mem, uint64_t *cpu_mem);
330  void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
331  int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
332  			     struct dma_fence **fence);
333  uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
334  					    uint32_t domain);
335  
336  /*
337   * sub allocation
338   */
339  
340  static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
341  {
342  	return sa_bo->manager->gpu_addr + sa_bo->soffset;
343  }
344  
345  static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
346  {
347  	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
348  }
349  
350  int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
351  				     struct amdgpu_sa_manager *sa_manager,
352  				     unsigned size, u32 align, u32 domain);
353  void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
354  				      struct amdgpu_sa_manager *sa_manager);
355  int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
356  				      struct amdgpu_sa_manager *sa_manager);
357  int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
358  		     struct amdgpu_sa_bo **sa_bo,
359  		     unsigned size, unsigned align);
360  void amdgpu_sa_bo_free(struct amdgpu_device *adev,
361  			      struct amdgpu_sa_bo **sa_bo,
362  			      struct dma_fence *fence);
363  #if defined(CONFIG_DEBUG_FS)
364  void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
365  					 struct seq_file *m);
366  u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
367  #endif
368  void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
369  
370  bool amdgpu_bo_support_uswc(u64 bo_flags);
371  
372  
373  #endif
374