xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 
28 struct amdgpu_gtt_mgr {
29 	struct drm_mm mm;
30 	spinlock_t lock;
31 	uint64_t available;
32 };
33 
34 /**
35  * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
36  *
37  * @man: TTM memory type manager
38  * @p_size: maximum size of GTT
39  *
40  * Allocate and initialize the GTT manager.
41  */
42 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
43 			       unsigned long p_size)
44 {
45 	struct amdgpu_gtt_mgr *mgr;
46 
47 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
48 	if (!mgr)
49 		return -ENOMEM;
50 
51 	drm_mm_init(&mgr->mm, 0, p_size);
52 	spin_lock_init(&mgr->lock);
53 	mgr->available = p_size;
54 	man->priv = mgr;
55 	return 0;
56 }
57 
58 /**
59  * amdgpu_gtt_mgr_fini - free and destroy GTT manager
60  *
61  * @man: TTM memory type manager
62  *
63  * Destroy and free the GTT manager, returns -EBUSY if ranges are still
64  * allocated inside it.
65  */
66 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
67 {
68 	struct amdgpu_gtt_mgr *mgr = man->priv;
69 
70 	spin_lock(&mgr->lock);
71 	if (!drm_mm_clean(&mgr->mm)) {
72 		spin_unlock(&mgr->lock);
73 		return -EBUSY;
74 	}
75 
76 	drm_mm_takedown(&mgr->mm);
77 	spin_unlock(&mgr->lock);
78 	kfree(mgr);
79 	man->priv = NULL;
80 	return 0;
81 }
82 
83 /**
84  * amdgpu_gtt_mgr_alloc - allocate new ranges
85  *
86  * @man: TTM memory type manager
87  * @tbo: TTM BO we need this range for
88  * @place: placement flags and restrictions
89  * @mem: the resulting mem object
90  *
91  * Allocate the address space for a node.
92  */
93 int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
94 			 struct ttm_buffer_object *tbo,
95 			 const struct ttm_place *place,
96 			 struct ttm_mem_reg *mem)
97 {
98 	struct amdgpu_gtt_mgr *mgr = man->priv;
99 	struct drm_mm_node *node = mem->mm_node;
100 	enum drm_mm_insert_mode mode;
101 	unsigned long fpfn, lpfn;
102 	int r;
103 
104 	if (node->start != AMDGPU_BO_INVALID_OFFSET)
105 		return 0;
106 
107 	if (place)
108 		fpfn = place->fpfn;
109 	else
110 		fpfn = 0;
111 
112 	if (place && place->lpfn)
113 		lpfn = place->lpfn;
114 	else
115 		lpfn = man->size;
116 
117 	mode = DRM_MM_INSERT_BEST;
118 	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
119 		mode = DRM_MM_INSERT_HIGH;
120 
121 	spin_lock(&mgr->lock);
122 	r = drm_mm_insert_node_in_range(&mgr->mm, node,
123 					mem->num_pages, mem->page_alignment, 0,
124 					fpfn, lpfn, mode);
125 	spin_unlock(&mgr->lock);
126 
127 	if (!r) {
128 		mem->start = node->start;
129 		if (&tbo->mem == mem)
130 			tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
131 			    tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
132 	}
133 
134 	return r;
135 }
136 
137 void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
138 {
139 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
140 	struct amdgpu_gtt_mgr *mgr = man->priv;
141 
142 	seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
143 		   man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
144 
145 }
146 /**
147  * amdgpu_gtt_mgr_new - allocate a new node
148  *
149  * @man: TTM memory type manager
150  * @tbo: TTM BO we need this range for
151  * @place: placement flags and restrictions
152  * @mem: the resulting mem object
153  *
154  * Dummy, allocate the node but no space for it yet.
155  */
156 static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
157 			      struct ttm_buffer_object *tbo,
158 			      const struct ttm_place *place,
159 			      struct ttm_mem_reg *mem)
160 {
161 	struct amdgpu_gtt_mgr *mgr = man->priv;
162 	struct drm_mm_node *node;
163 	int r;
164 
165 	spin_lock(&mgr->lock);
166 	if (mgr->available < mem->num_pages) {
167 		spin_unlock(&mgr->lock);
168 		return 0;
169 	}
170 	mgr->available -= mem->num_pages;
171 	spin_unlock(&mgr->lock);
172 
173 	node = kzalloc(sizeof(*node), GFP_KERNEL);
174 	if (!node) {
175 		r = -ENOMEM;
176 		goto err_out;
177 	}
178 
179 	node->start = AMDGPU_BO_INVALID_OFFSET;
180 	node->size = mem->num_pages;
181 	mem->mm_node = node;
182 
183 	if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
184 		r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
185 		if (unlikely(r)) {
186 			kfree(node);
187 			mem->mm_node = NULL;
188 			r = 0;
189 			goto err_out;
190 		}
191 	} else {
192 		mem->start = node->start;
193 	}
194 
195 	return 0;
196 err_out:
197 	spin_lock(&mgr->lock);
198 	mgr->available += mem->num_pages;
199 	spin_unlock(&mgr->lock);
200 
201 	return r;
202 }
203 
204 /**
205  * amdgpu_gtt_mgr_del - free ranges
206  *
207  * @man: TTM memory type manager
208  * @tbo: TTM BO we need this range for
209  * @place: placement flags and restrictions
210  * @mem: TTM memory object
211  *
212  * Free the allocated GTT again.
213  */
214 static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
215 			       struct ttm_mem_reg *mem)
216 {
217 	struct amdgpu_gtt_mgr *mgr = man->priv;
218 	struct drm_mm_node *node = mem->mm_node;
219 
220 	if (!node)
221 		return;
222 
223 	spin_lock(&mgr->lock);
224 	if (node->start != AMDGPU_BO_INVALID_OFFSET)
225 		drm_mm_remove_node(node);
226 	mgr->available += mem->num_pages;
227 	spin_unlock(&mgr->lock);
228 
229 	kfree(node);
230 	mem->mm_node = NULL;
231 }
232 
233 /**
234  * amdgpu_gtt_mgr_debug - dump VRAM table
235  *
236  * @man: TTM memory type manager
237  * @prefix: text prefix
238  *
239  * Dump the table content using printk.
240  */
241 static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
242 				  const char *prefix)
243 {
244 	struct amdgpu_gtt_mgr *mgr = man->priv;
245 	struct drm_printer p = drm_debug_printer(prefix);
246 
247 	spin_lock(&mgr->lock);
248 	drm_mm_print(&mgr->mm, &p);
249 	spin_unlock(&mgr->lock);
250 }
251 
252 const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
253 	.init = amdgpu_gtt_mgr_init,
254 	.takedown = amdgpu_gtt_mgr_fini,
255 	.get_node = amdgpu_gtt_mgr_new,
256 	.put_node = amdgpu_gtt_mgr_del,
257 	.debug = amdgpu_gtt_mgr_debug
258 };
259