xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h (revision 20f4eff1c8ba344d5c22234ac5611ff1489fbea6)
1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2008 Advanced Micro Devices, Inc.
3d38ceaf9SAlex Deucher  * Copyright 2008 Red Hat Inc.
4d38ceaf9SAlex Deucher  * Copyright 2009 Jerome Glisse.
5d38ceaf9SAlex Deucher  *
6d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
7d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
8d38ceaf9SAlex Deucher  * to deal in the Software without restriction, including without limitation
9d38ceaf9SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10d38ceaf9SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
11d38ceaf9SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice shall be included in
14d38ceaf9SAlex Deucher  * all copies or substantial portions of the Software.
15d38ceaf9SAlex Deucher  *
16d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20d38ceaf9SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21d38ceaf9SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22d38ceaf9SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
23d38ceaf9SAlex Deucher  *
24d38ceaf9SAlex Deucher  * Authors: Dave Airlie
25d38ceaf9SAlex Deucher  *          Alex Deucher
26d38ceaf9SAlex Deucher  *          Jerome Glisse
27d38ceaf9SAlex Deucher  */
28d38ceaf9SAlex Deucher #ifndef __AMDGPU_OBJECT_H__
29d38ceaf9SAlex Deucher #define __AMDGPU_OBJECT_H__
30d38ceaf9SAlex Deucher 
31d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h>
32d38ceaf9SAlex Deucher #include "amdgpu.h"
33d38ceaf9SAlex Deucher 
34d38ceaf9SAlex Deucher /**
35d38ceaf9SAlex Deucher  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
36d38ceaf9SAlex Deucher  * @mem_type:	ttm memory type
37d38ceaf9SAlex Deucher  *
38d38ceaf9SAlex Deucher  * Returns corresponding domain of the ttm mem_type
39d38ceaf9SAlex Deucher  */
40d38ceaf9SAlex Deucher static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
41d38ceaf9SAlex Deucher {
42d38ceaf9SAlex Deucher 	switch (mem_type) {
43d38ceaf9SAlex Deucher 	case TTM_PL_VRAM:
44d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_VRAM;
45d38ceaf9SAlex Deucher 	case TTM_PL_TT:
46d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_GTT;
47d38ceaf9SAlex Deucher 	case TTM_PL_SYSTEM:
48d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_CPU;
49d38ceaf9SAlex Deucher 	case AMDGPU_PL_GDS:
50d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_GDS;
51d38ceaf9SAlex Deucher 	case AMDGPU_PL_GWS:
52d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_GWS;
53d38ceaf9SAlex Deucher 	case AMDGPU_PL_OA:
54d38ceaf9SAlex Deucher 		return AMDGPU_GEM_DOMAIN_OA;
55d38ceaf9SAlex Deucher 	default:
56d38ceaf9SAlex Deucher 		break;
57d38ceaf9SAlex Deucher 	}
58d38ceaf9SAlex Deucher 	return 0;
59d38ceaf9SAlex Deucher }
60d38ceaf9SAlex Deucher 
61d38ceaf9SAlex Deucher /**
62d38ceaf9SAlex Deucher  * amdgpu_bo_reserve - reserve bo
63d38ceaf9SAlex Deucher  * @bo:		bo structure
64d38ceaf9SAlex Deucher  * @no_intr:	don't return -ERESTARTSYS on pending signal
65d38ceaf9SAlex Deucher  *
66d38ceaf9SAlex Deucher  * Returns:
67d38ceaf9SAlex Deucher  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
68d38ceaf9SAlex Deucher  * a signal. Release all buffer reservations and return to user-space.
69d38ceaf9SAlex Deucher  */
70d38ceaf9SAlex Deucher static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
71d38ceaf9SAlex Deucher {
72d38ceaf9SAlex Deucher 	int r;
73d38ceaf9SAlex Deucher 
74dfd5e50eSChristian König 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
75d38ceaf9SAlex Deucher 	if (unlikely(r != 0)) {
76d38ceaf9SAlex Deucher 		if (r != -ERESTARTSYS)
77d38ceaf9SAlex Deucher 			dev_err(bo->adev->dev, "%p reserve failed\n", bo);
78d38ceaf9SAlex Deucher 		return r;
79d38ceaf9SAlex Deucher 	}
80d38ceaf9SAlex Deucher 	return 0;
81d38ceaf9SAlex Deucher }
82d38ceaf9SAlex Deucher 
83d38ceaf9SAlex Deucher static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
84d38ceaf9SAlex Deucher {
85d38ceaf9SAlex Deucher 	ttm_bo_unreserve(&bo->tbo);
86d38ceaf9SAlex Deucher }
87d38ceaf9SAlex Deucher 
88d38ceaf9SAlex Deucher static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
89d38ceaf9SAlex Deucher {
90d38ceaf9SAlex Deucher 	return bo->tbo.num_pages << PAGE_SHIFT;
91d38ceaf9SAlex Deucher }
92d38ceaf9SAlex Deucher 
93d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
94d38ceaf9SAlex Deucher {
95d38ceaf9SAlex Deucher 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
96d38ceaf9SAlex Deucher }
97d38ceaf9SAlex Deucher 
98d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
99d38ceaf9SAlex Deucher {
100d38ceaf9SAlex Deucher 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
101d38ceaf9SAlex Deucher }
102d38ceaf9SAlex Deucher 
103d38ceaf9SAlex Deucher /**
104d38ceaf9SAlex Deucher  * amdgpu_bo_mmap_offset - return mmap offset of bo
105d38ceaf9SAlex Deucher  * @bo:	amdgpu object for which we query the offset
106d38ceaf9SAlex Deucher  *
107d38ceaf9SAlex Deucher  * Returns mmap offset of the object.
108d38ceaf9SAlex Deucher  */
109d38ceaf9SAlex Deucher static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
110d38ceaf9SAlex Deucher {
111d38ceaf9SAlex Deucher 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
112d38ceaf9SAlex Deucher }
113d38ceaf9SAlex Deucher 
114d38ceaf9SAlex Deucher int amdgpu_bo_create(struct amdgpu_device *adev,
115d38ceaf9SAlex Deucher 			    unsigned long size, int byte_align,
116d38ceaf9SAlex Deucher 			    bool kernel, u32 domain, u64 flags,
117d38ceaf9SAlex Deucher 			    struct sg_table *sg,
11872d7668bSChristian König 			    struct reservation_object *resv,
119d38ceaf9SAlex Deucher 			    struct amdgpu_bo **bo_ptr);
1207e5a547fSChunming Zhou int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
1217e5a547fSChunming Zhou 				unsigned long size, int byte_align,
1227e5a547fSChunming Zhou 				bool kernel, u32 domain, u64 flags,
1237e5a547fSChunming Zhou 				struct sg_table *sg,
1247e5a547fSChunming Zhou 				struct ttm_placement *placement,
12572d7668bSChristian König 			        struct reservation_object *resv,
1267e5a547fSChunming Zhou 				struct amdgpu_bo **bo_ptr);
1277c204889SChristian König int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
1287c204889SChristian König 			    unsigned long size, int align,
1297c204889SChristian König 			    u32 domain, struct amdgpu_bo **bo_ptr,
1307c204889SChristian König 			    u64 *gpu_addr, void **cpu_addr);
131d38ceaf9SAlex Deucher int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
132d38ceaf9SAlex Deucher void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
133d38ceaf9SAlex Deucher struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
134d38ceaf9SAlex Deucher void amdgpu_bo_unref(struct amdgpu_bo **bo);
135d38ceaf9SAlex Deucher int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
136d38ceaf9SAlex Deucher int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
1377e5a547fSChunming Zhou 			     u64 min_offset, u64 max_offset,
1387e5a547fSChunming Zhou 			     u64 *gpu_addr);
139d38ceaf9SAlex Deucher int amdgpu_bo_unpin(struct amdgpu_bo *bo);
140d38ceaf9SAlex Deucher int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
141d38ceaf9SAlex Deucher int amdgpu_bo_init(struct amdgpu_device *adev);
142d38ceaf9SAlex Deucher void amdgpu_bo_fini(struct amdgpu_device *adev);
143d38ceaf9SAlex Deucher int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
144d38ceaf9SAlex Deucher 				struct vm_area_struct *vma);
145d38ceaf9SAlex Deucher int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
146d38ceaf9SAlex Deucher void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
147d38ceaf9SAlex Deucher int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
148d38ceaf9SAlex Deucher 			    uint32_t metadata_size, uint64_t flags);
149d38ceaf9SAlex Deucher int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
150d38ceaf9SAlex Deucher 			   size_t buffer_size, uint32_t *metadata_size,
151d38ceaf9SAlex Deucher 			   uint64_t *flags);
152d38ceaf9SAlex Deucher void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
153d38ceaf9SAlex Deucher 				  struct ttm_mem_reg *new_mem);
154d38ceaf9SAlex Deucher int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
155e40a3115SChunming Zhou void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
156d38ceaf9SAlex Deucher 		     bool shared);
157cdb7e8f2SChristian König u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
158*20f4eff1SChunming Zhou int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
159*20f4eff1SChunming Zhou 			       struct amdgpu_ring *ring,
160*20f4eff1SChunming Zhou 			       struct amdgpu_bo *bo,
161*20f4eff1SChunming Zhou 			       struct reservation_object *resv,
162*20f4eff1SChunming Zhou 			       struct fence **fence, bool direct);
163*20f4eff1SChunming Zhou int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
164*20f4eff1SChunming Zhou 				  struct amdgpu_ring *ring,
165*20f4eff1SChunming Zhou 				  struct amdgpu_bo *bo,
166*20f4eff1SChunming Zhou 				  struct reservation_object *resv,
167*20f4eff1SChunming Zhou 				  struct fence **fence,
168*20f4eff1SChunming Zhou 				  bool direct);
169*20f4eff1SChunming Zhou 
170d38ceaf9SAlex Deucher 
171d38ceaf9SAlex Deucher /*
172d38ceaf9SAlex Deucher  * sub allocation
173d38ceaf9SAlex Deucher  */
174d38ceaf9SAlex Deucher 
175d38ceaf9SAlex Deucher static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
176d38ceaf9SAlex Deucher {
177d38ceaf9SAlex Deucher 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
178d38ceaf9SAlex Deucher }
179d38ceaf9SAlex Deucher 
180d38ceaf9SAlex Deucher static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
181d38ceaf9SAlex Deucher {
182d38ceaf9SAlex Deucher 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
183d38ceaf9SAlex Deucher }
184d38ceaf9SAlex Deucher 
185d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
186d38ceaf9SAlex Deucher 				     struct amdgpu_sa_manager *sa_manager,
187d38ceaf9SAlex Deucher 				     unsigned size, u32 align, u32 domain);
188d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
189d38ceaf9SAlex Deucher 				      struct amdgpu_sa_manager *sa_manager);
190d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
191d38ceaf9SAlex Deucher 				      struct amdgpu_sa_manager *sa_manager);
192d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
193d38ceaf9SAlex Deucher 					struct amdgpu_sa_manager *sa_manager);
194bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
195d38ceaf9SAlex Deucher 		     struct amdgpu_sa_bo **sa_bo,
196d38ceaf9SAlex Deucher 		     unsigned size, unsigned align);
197d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev,
198d38ceaf9SAlex Deucher 			      struct amdgpu_sa_bo **sa_bo,
1994ce9891eSChunming Zhou 			      struct fence *fence);
200d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
201d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
202d38ceaf9SAlex Deucher 					 struct seq_file *m);
203d38ceaf9SAlex Deucher #endif
204d38ceaf9SAlex Deucher 
205d38ceaf9SAlex Deucher 
206d38ceaf9SAlex Deucher #endif
207