xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef __AMDGPU_TTM_H__
25 #define __AMDGPU_TTM_H__
26 
27 #include <linux/dma-direction.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/ttm/ttm_placement.h>
30 #include "amdgpu_vram_mgr.h"
31 #include "amdgpu_hmm.h"
32 #include "amdgpu_gmc.h"
33 
34 #define AMDGPU_PL_GDS		(TTM_PL_PRIV + 0)
35 #define AMDGPU_PL_GWS		(TTM_PL_PRIV + 1)
36 #define AMDGPU_PL_OA		(TTM_PL_PRIV + 2)
37 #define AMDGPU_PL_PREEMPT	(TTM_PL_PRIV + 3)
38 #define AMDGPU_PL_DOORBELL	(TTM_PL_PRIV + 4)
39 #define AMDGPU_PL_MMIO_REMAP	(TTM_PL_PRIV + 5)
40 #define __AMDGPU_PL_NUM	(TTM_PL_PRIV + 6)
41 
42 #define AMDGPU_GTT_MAX_TRANSFER_SIZE	1024
43 
44 extern const struct attribute_group amdgpu_vram_mgr_attr_group;
45 extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
46 
47 struct hmm_range;
48 
49 struct amdgpu_gtt_mgr {
50 	struct ttm_resource_manager manager;
51 	struct drm_mm mm;
52 	spinlock_t lock;
53 };
54 
55 struct amdgpu_ttm_buffer_entity {
56 	struct drm_sched_entity base;
57 	struct mutex		lock;
58 	struct drm_mm_node	gart_node;
59 	u64			gart_window_offs[2];
60 };
61 
62 enum amdgpu_resv_region_id {
63 	AMDGPU_RESV_STOLEN_VGA,
64 	AMDGPU_RESV_STOLEN_EXTENDED,
65 	AMDGPU_RESV_STOLEN_RESERVED,
66 	AMDGPU_RESV_FW,
67 	AMDGPU_RESV_FW_EXTEND,
68 	AMDGPU_RESV_FW_VRAM_USAGE,
69 	AMDGPU_RESV_DRV_VRAM_USAGE,
70 	AMDGPU_RESV_MEM_TRAIN,
71 	AMDGPU_RESV_MAX
72 };
73 
74 struct amdgpu_vram_resv {
75 	uint64_t		offset;
76 	uint64_t		size;
77 	struct amdgpu_bo	*bo;
78 	void			*cpu_ptr;
79 	bool			needs_cpu_map;
80 };
81 
82 struct amdgpu_mman {
83 	struct ttm_device		bdev;
84 	struct ttm_pool			*ttm_pools;
85 	bool				initialized;
86 	void __iomem			*aper_base_kaddr;
87 
88 	/* buffer handling */
89 	const struct amdgpu_buffer_funcs	*buffer_funcs;
90 	struct amdgpu_ring			*buffer_funcs_ring;
91 	bool					buffer_funcs_enabled;
92 
93 	/* @default_entity: for workarounds, has no gart windows */
94 	struct amdgpu_ttm_buffer_entity default_entity;
95 	struct amdgpu_ttm_buffer_entity *clear_entities;
96 	atomic_t next_clear_entity;
97 	u32 num_clear_entities;
98 	struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
99 	atomic_t next_move_entity;
100 	u32 num_move_entities;
101 
102 	struct amdgpu_vram_mgr vram_mgr;
103 	struct amdgpu_gtt_mgr gtt_mgr;
104 	struct ttm_resource_manager preempt_mgr;
105 
106 	bool			keep_stolen_vga_memory;
107 
108 	struct amdgpu_vram_resv		resv_region[AMDGPU_RESV_MAX];
109 
110 	/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
111 	struct amdgpu_bo	*sdma_access_bo;
112 	void			*sdma_access_ptr;
113 };
114 
115 struct amdgpu_copy_mem {
116 	struct ttm_buffer_object	*bo;
117 	struct ttm_resource		*mem;
118 	unsigned long			offset;
119 };
120 
121 #define AMDGPU_COPY_FLAGS_TMZ		(1 << 0)
122 #define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED	(1 << 1)
123 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED	(1 << 2)
124 #define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_SHIFT		3
125 #define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_MASK		0x03
126 #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_SHIFT		5
127 #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK		0x07
128 #define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT		8
129 #define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK		0x3f
130 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT	14
131 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK	0x1
132 
133 #define AMDGPU_COPY_FLAGS_SET(field, value) \
134 	(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
135 #define AMDGPU_COPY_FLAGS_GET(value, field) \
136 	(((__u32)(value) >> AMDGPU_COPY_FLAGS_##field##_SHIFT) & AMDGPU_COPY_FLAGS_##field##_MASK)
137 
138 int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
139 void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
140 int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
141 void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev);
142 int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
143 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
144 
145 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
146 void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
147 
148 int amdgpu_gtt_mgr_alloc_entries(struct amdgpu_gtt_mgr *mgr,
149 				 struct drm_mm_node *mm_node,
150 				 u64 num_pages,
151 				 enum drm_mm_insert_mode mode);
152 void amdgpu_gtt_mgr_free_entries(struct amdgpu_gtt_mgr *mgr,
153 				 struct drm_mm_node *mm_node);
154 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
155 
156 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
157 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
158 			      struct ttm_resource *mem,
159 			      u64 offset, u64 size,
160 			      struct device *dev,
161 			      enum dma_data_direction dir,
162 			      struct sg_table **sgt);
163 void amdgpu_vram_mgr_free_sgt(struct device *dev,
164 			      enum dma_data_direction dir,
165 			      struct sg_table *sgt);
166 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
167 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
168 				  uint64_t start, uint64_t size);
169 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
170 				      uint64_t start);
171 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
172 
173 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
174 			    struct ttm_resource *res);
175 
176 void amdgpu_ttm_init_vram_resv(struct amdgpu_device *adev,
177 				enum amdgpu_resv_region_id id,
178 				uint64_t offset, uint64_t size,
179 				bool needs_cpu_map);
180 int amdgpu_ttm_mark_vram_reserved(struct amdgpu_device *adev,
181 				  enum amdgpu_resv_region_id id);
182 void amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device *adev,
183 				     enum amdgpu_resv_region_id id);
184 
185 int amdgpu_ttm_init(struct amdgpu_device *adev);
186 void amdgpu_ttm_fini(struct amdgpu_device *adev);
187 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
188 					bool enable);
189 int amdgpu_copy_buffer(struct amdgpu_device *adev,
190 		       struct amdgpu_ttm_buffer_entity *entity,
191 		       uint64_t src_offset,
192 		       uint64_t dst_offset, uint32_t byte_count,
193 		       struct dma_resv *resv,
194 		       struct dma_fence **fence,
195 		       bool vm_needs_flush, uint32_t copy_flags);
196 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
197 			    struct dma_resv *resv,
198 			    struct dma_fence **fence);
199 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
200 		       struct amdgpu_bo *bo,
201 		       uint32_t src_data,
202 		       struct dma_resv *resv,
203 		       struct dma_fence **f,
204 		       u64 k_job_id);
205 struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev);
206 
207 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
208 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
209 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
210 
211 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
212 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
213 				 struct amdgpu_hmm_range *range);
214 #else
215 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
216 					       struct amdgpu_hmm_range *range)
217 {
218 	return -EPERM;
219 }
220 #endif
221 
222 /**
223  * amdgpu_compute_gart_address() - Returns GART address of an entity's window
224  * @gmc: The &struct amdgpu_gmc instance to use
225  * @entity: The &struct amdgpu_ttm_buffer_entity owning the GART window
226  * @index: The window to use (must be 0 or 1)
227  */
228 static inline u64 amdgpu_compute_gart_address(struct amdgpu_gmc *gmc,
229 					      struct amdgpu_ttm_buffer_entity *entity,
230 					      int index)
231 {
232 	return gmc->gart_start + entity->gart_window_offs[index];
233 }
234 
235 /**
236  * amdgpu_gtt_node_to_byte_offset() - Returns a byte offset of a gtt node
237  */
238 static inline u64 amdgpu_gtt_node_to_byte_offset(const struct drm_mm_node *gtt_node)
239 {
240 	return gtt_node->start * (u64)PAGE_SIZE;
241 }
242 
243 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
244 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
245 			      uint64_t *user_addr);
246 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
247 			      uint64_t addr, uint32_t flags);
248 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
249 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
250 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
251 				  unsigned long end, unsigned long *userptr);
252 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
253 				       int *last_invalidated);
254 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
255 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
256 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
257 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
258 				 struct ttm_resource *mem);
259 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type);
260 
261 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
262 
263 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
264 				    struct ttm_resource *res,
265 				    struct device *dev,
266 				    enum dma_data_direction dir,
267 				    struct sg_table **sgt);
268 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
269 				    enum dma_data_direction dir,
270 				    struct sg_table *sgt);
271 
272 #endif
273