xref: /linux/drivers/gpu/drm/i915/intel_region_ttm.c (revision b7e1e969c887c897947fdc3754fe9b0c24acb155)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 #include <drm/ttm/ttm_device.h>
6 #include <drm/ttm/ttm_range_manager.h>
7 
8 #include "i915_drv.h"
9 #include "i915_scatterlist.h"
10 #include "i915_ttm_buddy_manager.h"
11 
12 #include "intel_region_ttm.h"
13 
14 #include "gem/i915_gem_region.h"
15 #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
16 /**
17  * DOC: TTM support structure
18  *
19  * The code in this file deals with setting up memory managers for TTM
20  * LMEM and MOCK regions and converting the output from
21  * the managers to struct sg_table, Basically providing the mapping from
22  * i915 GEM regions to TTM memory types and resource managers.
23  */
24 
25 /**
26  * intel_region_ttm_device_init - Initialize a TTM device
27  * @dev_priv: Pointer to an i915 device private structure.
28  *
29  * Return: 0 on success, negative error code on failure.
30  */
intel_region_ttm_device_init(struct drm_i915_private * dev_priv)31 int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
32 {
33 	struct drm_device *drm = &dev_priv->drm;
34 
35 	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
36 			       drm->dev, drm->anon_inode->i_mapping,
37 			       drm->vma_offset_manager, false, false);
38 }
39 
40 /**
41  * intel_region_ttm_device_fini - Finalize a TTM device
42  * @dev_priv: Pointer to an i915 device private structure.
43  */
intel_region_ttm_device_fini(struct drm_i915_private * dev_priv)44 void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
45 {
46 	ttm_device_fini(&dev_priv->bdev);
47 }
48 
49 /*
50  * Map the i915 memory regions to TTM memory types. We use the
51  * driver-private types for now, reserving TTM_PL_VRAM for stolen
52  * memory and TTM_PL_TT for GGTT use if decided to implement this.
53  */
intel_region_to_ttm_type(const struct intel_memory_region * mem)54 int intel_region_to_ttm_type(const struct intel_memory_region *mem)
55 {
56 	int type;
57 
58 	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
59 		   mem->type != INTEL_MEMORY_MOCK &&
60 		   mem->type != INTEL_MEMORY_SYSTEM);
61 
62 	if (mem->type == INTEL_MEMORY_SYSTEM)
63 		return TTM_PL_SYSTEM;
64 
65 	type = mem->instance + TTM_PL_PRIV;
66 	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
67 
68 	return type;
69 }
70 
71 /**
72  * intel_region_ttm_init - Initialize a memory region for TTM.
73  * @mem: The region to initialize.
74  *
75  * This function initializes a suitable TTM resource manager for the
76  * region, and if it's a LMEM region type, attaches it to the TTM
77  * device. MOCK regions are NOT attached to the TTM device, since we don't
78  * have one for the mock selftests.
79  *
80  * Return: 0 on success, negative error code on failure.
81  */
intel_region_ttm_init(struct intel_memory_region * mem)82 int intel_region_ttm_init(struct intel_memory_region *mem)
83 {
84 	struct ttm_device *bdev = &mem->i915->bdev;
85 	int mem_type = intel_region_to_ttm_type(mem);
86 	int ret;
87 
88 	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
89 				      resource_size(&mem->region),
90 				      resource_size(&mem->io),
91 				      mem->min_page_size, PAGE_SIZE);
92 	if (ret)
93 		return ret;
94 
95 	mem->region_private = ttm_manager_type(bdev, mem_type);
96 
97 	return 0;
98 }
99 
100 /**
101  * intel_region_ttm_fini - Finalize a TTM region.
102  * @mem: The memory region
103  *
104  * This functions takes down the TTM resource manager associated with the
105  * memory region, and if it was registered with the TTM device,
106  * removes that registration.
107  */
intel_region_ttm_fini(struct intel_memory_region * mem)108 int intel_region_ttm_fini(struct intel_memory_region *mem)
109 {
110 	struct ttm_resource_manager *man = mem->region_private;
111 	int ret = -EBUSY;
112 	int count;
113 
114 	/*
115 	 * Put the region's move fences. This releases requests that
116 	 * may hold on to contexts and vms that may hold on to buffer
117 	 * objects placed in this region.
118 	 */
119 	if (man)
120 		ttm_resource_manager_cleanup(man);
121 
122 	/* Flush objects from region. */
123 	for (count = 0; count < 10; ++count) {
124 		i915_gem_flush_free_objects(mem->i915);
125 
126 		mutex_lock(&mem->objects.lock);
127 		if (list_empty(&mem->objects.list))
128 			ret = 0;
129 		mutex_unlock(&mem->objects.lock);
130 		if (!ret)
131 			break;
132 
133 		msleep(20);
134 		drain_workqueue(mem->i915->bdev.wq);
135 	}
136 
137 	/* If we leaked objects, Don't free the region causing use after free */
138 	if (ret || !man)
139 		return ret;
140 
141 	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
142 				      intel_region_to_ttm_type(mem));
143 	GEM_WARN_ON(ret);
144 	mem->region_private = NULL;
145 
146 	return ret;
147 }
148 
149 /**
150  * intel_region_ttm_resource_to_rsgt -
151  * Convert an opaque TTM resource manager resource to a refcounted sg_table.
152  * @mem: The memory region.
153  * @res: The resource manager resource obtained from the TTM resource manager.
154  * @page_alignment: Required page alignment for each sg entry. Power of two.
155  *
156  * The gem backends typically use sg-tables for operations on the underlying
157  * io_memory. So provide a way for the backends to translate the
158  * nodes they are handed from TTM to sg-tables.
159  *
160  * Return: A malloced sg_table on success, an error pointer on failure.
161  */
162 struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region * mem,struct ttm_resource * res,u32 page_alignment)163 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
164 				  struct ttm_resource *res,
165 				  u32 page_alignment)
166 {
167 	if (mem->is_range_manager) {
168 		struct ttm_range_mgr_node *range_node =
169 			to_ttm_range_mgr_node(res);
170 
171 		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
172 					      mem->region.start,
173 					      page_alignment);
174 	} else {
175 		return i915_rsgt_from_buddy_resource(res, mem->region.start,
176 						     page_alignment);
177 	}
178 }
179 
180 #ifdef CONFIG_DRM_I915_SELFTEST
181 /**
182  * intel_region_ttm_resource_alloc - Allocate memory resources from a region
183  * @mem: The memory region,
184  * @offset: BO offset
185  * @size: The requested size in bytes
186  * @flags: Allocation flags
187  *
188  * This functionality is provided only for callers that need to allocate
189  * memory from standalone TTM range managers, without the TTM eviction
190  * functionality. Don't use if you are not completely sure that's the
191  * case. The returned opaque node can be converted to an sg_table using
192  * intel_region_ttm_resource_to_st(), and can be freed using
193  * intel_region_ttm_resource_free().
194  *
195  * Return: A valid pointer on success, an error pointer on failure.
196  */
197 struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size,unsigned int flags)198 intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
199 				resource_size_t offset,
200 				resource_size_t size,
201 				unsigned int flags)
202 {
203 	struct ttm_resource_manager *man = mem->region_private;
204 	struct ttm_place place = {};
205 	struct ttm_buffer_object mock_bo = {};
206 	struct ttm_resource *res;
207 	int ret;
208 
209 	if (flags & I915_BO_ALLOC_CONTIGUOUS)
210 		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
211 	if (offset != I915_BO_INVALID_OFFSET) {
212 		if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) {
213 			ret = -E2BIG;
214 			goto out;
215 		}
216 		place.fpfn = offset >> PAGE_SHIFT;
217 		if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) {
218 			ret = -E2BIG;
219 			goto out;
220 		}
221 		place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
222 	} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
223 		if (flags & I915_BO_ALLOC_GPU_ONLY) {
224 			place.flags |= TTM_PL_FLAG_TOPDOWN;
225 		} else {
226 			place.fpfn = 0;
227 			if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
228 				ret = -E2BIG;
229 				goto out;
230 			}
231 			place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
232 		}
233 	}
234 
235 	mock_bo.base.size = size;
236 	mock_bo.bdev = &mem->i915->bdev;
237 
238 	ret = man->func->alloc(man, &mock_bo, &place, &res);
239 
240 out:
241 	if (ret == -ENOSPC)
242 		ret = -ENXIO;
243 	if (!ret)
244 		res->bo = NULL; /* Rather blow up, then some uaf */
245 	return ret ? ERR_PTR(ret) : res;
246 }
247 
248 #endif
249 
250 /**
251  * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
252  * @mem: The region the resource was allocated from.
253  * @res: The opaque resource representing an allocation.
254  */
intel_region_ttm_resource_free(struct intel_memory_region * mem,struct ttm_resource * res)255 void intel_region_ttm_resource_free(struct intel_memory_region *mem,
256 				    struct ttm_resource *res)
257 {
258 	struct ttm_resource_manager *man = mem->region_private;
259 	struct ttm_buffer_object mock_bo = {};
260 
261 	mock_bo.base.size = res->size;
262 	mock_bo.bdev = &mem->i915->bdev;
263 	res->bo = &mock_bo;
264 
265 	man->func->free(man, res);
266 }
267