xref: /linux/drivers/gpu/drm/xe/xe_shrinker.c (revision dfd4b508c8c6106083698a0dd5e35aecc7c48725)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/shrinker.h>
7 
8 #include <drm/drm_managed.h>
9 #include <drm/ttm/ttm_backup.h>
10 #include <drm/ttm/ttm_bo.h>
11 #include <drm/ttm/ttm_tt.h>
12 
13 #include "xe_bo.h"
14 #include "xe_pm.h"
15 #include "xe_shrinker.h"
16 
17 /**
18  * struct xe_shrinker - per-device shrinker
19  * @xe: Back pointer to the device.
20  * @lock: Lock protecting accounting.
21  * @shrinkable_pages: Number of pages that are currently shrinkable.
22  * @purgeable_pages: Number of pages that are currently purgeable.
23  * @shrink: Pointer to the mm shrinker.
24  * @pm_worker: Worker to wake up the device if required.
25  */
26 struct xe_shrinker {
27 	struct xe_device *xe;
28 	rwlock_t lock;
29 	long shrinkable_pages;
30 	long purgeable_pages;
31 	struct shrinker *shrink;
32 	struct work_struct pm_worker;
33 };
34 
to_xe_shrinker(struct shrinker * shrink)35 static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
36 {
37 	return shrink->private_data;
38 }
39 
40 /**
41  * xe_shrinker_mod_pages() - Modify shrinker page accounting
42  * @shrinker: Pointer to the struct xe_shrinker.
43  * @shrinkable: Shrinkable pages delta. May be negative.
44  * @purgeable: Purgeable page delta. May be negative.
45  *
46  * Modifies the shrinkable and purgeable pages accounting.
47  */
48 void
xe_shrinker_mod_pages(struct xe_shrinker * shrinker,long shrinkable,long purgeable)49 xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
50 {
51 	write_lock(&shrinker->lock);
52 	shrinker->shrinkable_pages += shrinkable;
53 	shrinker->purgeable_pages += purgeable;
54 	write_unlock(&shrinker->lock);
55 }
56 
__xe_shrinker_walk(struct xe_device * xe,struct ttm_operation_ctx * ctx,const struct xe_bo_shrink_flags flags,unsigned long to_scan,unsigned long * scanned)57 static s64 __xe_shrinker_walk(struct xe_device *xe,
58 			      struct ttm_operation_ctx *ctx,
59 			      const struct xe_bo_shrink_flags flags,
60 			      unsigned long to_scan, unsigned long *scanned)
61 {
62 	unsigned int mem_type;
63 	s64 freed = 0, lret;
64 
65 	for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
66 		struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
67 		struct ttm_bo_lru_cursor curs;
68 		struct ttm_buffer_object *ttm_bo;
69 		struct ttm_lru_walk_arg arg = {
70 			.ctx = ctx,
71 			.trylock_only = true,
72 		};
73 
74 		if (!man || !man->use_tt)
75 			continue;
76 
77 		ttm_bo_lru_for_each_reserved_guarded(&curs, man, &arg, ttm_bo) {
78 			if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
79 				continue;
80 
81 			lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
82 			if (lret < 0)
83 				return lret;
84 
85 			freed += lret;
86 			if (*scanned >= to_scan)
87 				break;
88 		}
89 		/* Trylocks should never error, just fail. */
90 		xe_assert(xe, !IS_ERR(ttm_bo));
91 	}
92 
93 	return freed;
94 }
95 
96 /*
97  * Try shrinking idle objects without writeback first, then if not sufficient,
98  * try also non-idle objects and finally if that's not sufficient either,
99  * add writeback. This avoids stalls and explicit writebacks with light or
100  * moderate memory pressure.
101  */
xe_shrinker_walk(struct xe_device * xe,struct ttm_operation_ctx * ctx,const struct xe_bo_shrink_flags flags,unsigned long to_scan,unsigned long * scanned)102 static s64 xe_shrinker_walk(struct xe_device *xe,
103 			    struct ttm_operation_ctx *ctx,
104 			    const struct xe_bo_shrink_flags flags,
105 			    unsigned long to_scan, unsigned long *scanned)
106 {
107 	bool no_wait_gpu = true;
108 	struct xe_bo_shrink_flags save_flags = flags;
109 	s64 lret, freed;
110 
111 	swap(no_wait_gpu, ctx->no_wait_gpu);
112 	save_flags.writeback = false;
113 	lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
114 	swap(no_wait_gpu, ctx->no_wait_gpu);
115 	if (lret < 0 || *scanned >= to_scan)
116 		return lret;
117 
118 	freed = lret;
119 	if (!ctx->no_wait_gpu) {
120 		lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
121 		if (lret < 0)
122 			return lret;
123 		freed += lret;
124 		if (*scanned >= to_scan)
125 			return freed;
126 	}
127 
128 	if (flags.writeback) {
129 		lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
130 		if (lret < 0)
131 			return lret;
132 		freed += lret;
133 	}
134 
135 	return freed;
136 }
137 
138 static unsigned long
xe_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)139 xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
140 {
141 	struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
142 	unsigned long num_pages;
143 	bool can_backup = !!(sc->gfp_mask & __GFP_FS);
144 
145 	num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
146 	read_lock(&shrinker->lock);
147 
148 	if (can_backup)
149 		num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
150 	else
151 		num_pages = 0;
152 
153 	num_pages += shrinker->purgeable_pages;
154 	read_unlock(&shrinker->lock);
155 
156 	return num_pages ? num_pages : SHRINK_EMPTY;
157 }
158 
159 /*
160  * Check if we need runtime pm, and if so try to grab a reference if
161  * already active. If grabbing a reference fails, queue a worker that
162  * does it for us outside of reclaim, but don't wait for it to complete.
163  * If bo shrinking needs an rpm reference and we don't have it (yet),
164  * that bo will be skipped anyway.
165  */
xe_shrinker_runtime_pm_get(struct xe_shrinker * shrinker,bool force,unsigned long nr_to_scan,bool can_backup)166 static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
167 				       unsigned long nr_to_scan, bool can_backup)
168 {
169 	struct xe_device *xe = shrinker->xe;
170 
171 	if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
172 	    !ttm_backup_bytes_avail())
173 		return false;
174 
175 	if (!force) {
176 		read_lock(&shrinker->lock);
177 		force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
178 		read_unlock(&shrinker->lock);
179 		if (!force)
180 			return false;
181 	}
182 
183 	if (!xe_pm_runtime_get_if_active(xe)) {
184 		if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
185 			xe_pm_runtime_get(xe);
186 			return true;
187 		}
188 		queue_work(xe->unordered_wq, &shrinker->pm_worker);
189 		return false;
190 	}
191 
192 	return true;
193 }
194 
xe_shrinker_runtime_pm_put(struct xe_shrinker * shrinker,bool runtime_pm)195 static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
196 {
197 	if (runtime_pm)
198 		xe_pm_runtime_put(shrinker->xe);
199 }
200 
xe_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)201 static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
202 {
203 	struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
204 	struct ttm_operation_ctx ctx = {
205 		.interruptible = false,
206 		.no_wait_gpu = ttm_bo_shrink_avoid_wait(),
207 	};
208 	unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
209 	struct xe_bo_shrink_flags shrink_flags = {
210 		.purge = true,
211 		/* Don't request writeback without __GFP_IO. */
212 		.writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
213 	};
214 	bool runtime_pm;
215 	bool purgeable;
216 	bool can_backup = !!(sc->gfp_mask & __GFP_FS);
217 	s64 lret;
218 
219 	nr_to_scan = sc->nr_to_scan;
220 
221 	read_lock(&shrinker->lock);
222 	purgeable = !!shrinker->purgeable_pages;
223 	read_unlock(&shrinker->lock);
224 
225 	/* Might need runtime PM. Try to wake early if it looks like it. */
226 	runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
227 
228 	if (purgeable && nr_scanned < nr_to_scan) {
229 		lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
230 					nr_to_scan, &nr_scanned);
231 		if (lret >= 0)
232 			freed += lret;
233 	}
234 
235 	sc->nr_scanned = nr_scanned;
236 	if (nr_scanned >= nr_to_scan || !can_backup)
237 		goto out;
238 
239 	/* If we didn't wake before, try to do it now if needed. */
240 	if (!runtime_pm)
241 		runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
242 
243 	shrink_flags.purge = false;
244 
245 	lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
246 				nr_to_scan, &nr_scanned);
247 	if (lret >= 0)
248 		freed += lret;
249 
250 	sc->nr_scanned = nr_scanned;
251 out:
252 	xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
253 	return nr_scanned ? freed : SHRINK_STOP;
254 }
255 
256 /* Wake up the device for shrinking. */
xe_shrinker_pm(struct work_struct * work)257 static void xe_shrinker_pm(struct work_struct *work)
258 {
259 	struct xe_shrinker *shrinker =
260 		container_of(work, typeof(*shrinker), pm_worker);
261 
262 	xe_pm_runtime_get(shrinker->xe);
263 	xe_pm_runtime_put(shrinker->xe);
264 }
265 
xe_shrinker_fini(struct drm_device * drm,void * arg)266 static void xe_shrinker_fini(struct drm_device *drm, void *arg)
267 {
268 	struct xe_shrinker *shrinker = arg;
269 
270 	xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
271 	xe_assert(shrinker->xe, !shrinker->purgeable_pages);
272 	shrinker_free(shrinker->shrink);
273 	flush_work(&shrinker->pm_worker);
274 	kfree(shrinker);
275 }
276 
277 /**
278  * xe_shrinker_create() - Create an xe per-device shrinker
279  * @xe: Pointer to the xe device.
280  *
281  * Return: %0 on success. Negative error code on failure.
282  */
xe_shrinker_create(struct xe_device * xe)283 int xe_shrinker_create(struct xe_device *xe)
284 {
285 	struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
286 
287 	if (!shrinker)
288 		return -ENOMEM;
289 
290 	shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
291 	if (!shrinker->shrink) {
292 		kfree(shrinker);
293 		return -ENOMEM;
294 	}
295 
296 	INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
297 	shrinker->xe = xe;
298 	rwlock_init(&shrinker->lock);
299 	shrinker->shrink->count_objects = xe_shrinker_count;
300 	shrinker->shrink->scan_objects = xe_shrinker_scan;
301 	shrinker->shrink->private_data = shrinker;
302 	shrinker_register(shrinker->shrink);
303 	xe->mem.shrinker = shrinker;
304 
305 	return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
306 }
307