xref: /linux/drivers/gpu/drm/xe/xe_shrinker.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/shrinker.h>
7 
8 #include <drm/drm_managed.h>
9 #include <drm/ttm/ttm_backup.h>
10 #include <drm/ttm/ttm_bo.h>
11 #include <drm/ttm/ttm_tt.h>
12 
13 #include "xe_bo.h"
14 #include "xe_pm.h"
15 #include "xe_shrinker.h"
16 
17 /**
18  * struct xe_shrinker - per-device shrinker
19  * @xe: Back pointer to the device.
20  * @lock: Lock protecting accounting.
21  * @shrinkable_pages: Number of pages that are currently shrinkable.
22  * @purgeable_pages: Number of pages that are currently purgeable.
23  * @shrink: Pointer to the mm shrinker.
24  * @pm_worker: Worker to wake up the device if required.
25  */
26 struct xe_shrinker {
27 	struct xe_device *xe;
28 	rwlock_t lock;
29 	long shrinkable_pages;
30 	long purgeable_pages;
31 	struct shrinker *shrink;
32 	struct work_struct pm_worker;
33 };
34 
35 static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
36 {
37 	return shrink->private_data;
38 }
39 
40 /**
41  * xe_shrinker_mod_pages() - Modify shrinker page accounting
42  * @shrinker: Pointer to the struct xe_shrinker.
43  * @shrinkable: Shrinkable pages delta. May be negative.
44  * @purgeable: Purgeable page delta. May be negative.
45  *
46  * Modifies the shrinkable and purgeable pages accounting.
47  */
48 void
49 xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
50 {
51 	write_lock(&shrinker->lock);
52 	shrinker->shrinkable_pages += shrinkable;
53 	shrinker->purgeable_pages += purgeable;
54 	write_unlock(&shrinker->lock);
55 }
56 
57 static s64 xe_shrinker_walk(struct xe_device *xe,
58 			    struct ttm_operation_ctx *ctx,
59 			    const struct xe_bo_shrink_flags flags,
60 			    unsigned long to_scan, unsigned long *scanned)
61 {
62 	unsigned int mem_type;
63 	s64 freed = 0, lret;
64 
65 	for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
66 		struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
67 		struct ttm_bo_lru_cursor curs;
68 		struct ttm_buffer_object *ttm_bo;
69 
70 		if (!man || !man->use_tt)
71 			continue;
72 
73 		ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
74 			if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
75 				continue;
76 
77 			lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
78 			if (lret < 0)
79 				return lret;
80 
81 			freed += lret;
82 			if (*scanned >= to_scan)
83 				break;
84 		}
85 	}
86 
87 	return freed;
88 }
89 
90 static unsigned long
91 xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
92 {
93 	struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
94 	unsigned long num_pages;
95 	bool can_backup = !!(sc->gfp_mask & __GFP_FS);
96 
97 	num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
98 	read_lock(&shrinker->lock);
99 
100 	if (can_backup)
101 		num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
102 	else
103 		num_pages = 0;
104 
105 	num_pages += shrinker->purgeable_pages;
106 	read_unlock(&shrinker->lock);
107 
108 	return num_pages ? num_pages : SHRINK_EMPTY;
109 }
110 
111 /*
112  * Check if we need runtime pm, and if so try to grab a reference if
113  * already active. If grabbing a reference fails, queue a worker that
114  * does it for us outside of reclaim, but don't wait for it to complete.
115  * If bo shrinking needs an rpm reference and we don't have it (yet),
116  * that bo will be skipped anyway.
117  */
118 static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
119 				       unsigned long nr_to_scan, bool can_backup)
120 {
121 	struct xe_device *xe = shrinker->xe;
122 
123 	if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
124 	    !ttm_backup_bytes_avail())
125 		return false;
126 
127 	if (!force) {
128 		read_lock(&shrinker->lock);
129 		force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
130 		read_unlock(&shrinker->lock);
131 		if (!force)
132 			return false;
133 	}
134 
135 	if (!xe_pm_runtime_get_if_active(xe)) {
136 		if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
137 			xe_pm_runtime_get(xe);
138 			return true;
139 		}
140 		queue_work(xe->unordered_wq, &shrinker->pm_worker);
141 		return false;
142 	}
143 
144 	return true;
145 }
146 
147 static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
148 {
149 	if (runtime_pm)
150 		xe_pm_runtime_put(shrinker->xe);
151 }
152 
153 static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
154 {
155 	struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
156 	struct ttm_operation_ctx ctx = {
157 		.interruptible = false,
158 		.no_wait_gpu = ttm_bo_shrink_avoid_wait(),
159 	};
160 	unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
161 	struct xe_bo_shrink_flags shrink_flags = {
162 		.purge = true,
163 		/* Don't request writeback without __GFP_IO. */
164 		.writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
165 	};
166 	bool runtime_pm;
167 	bool purgeable;
168 	bool can_backup = !!(sc->gfp_mask & __GFP_FS);
169 	s64 lret;
170 
171 	nr_to_scan = sc->nr_to_scan;
172 
173 	read_lock(&shrinker->lock);
174 	purgeable = !!shrinker->purgeable_pages;
175 	read_unlock(&shrinker->lock);
176 
177 	/* Might need runtime PM. Try to wake early if it looks like it. */
178 	runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
179 
180 	if (purgeable && nr_scanned < nr_to_scan) {
181 		lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
182 					nr_to_scan, &nr_scanned);
183 		if (lret >= 0)
184 			freed += lret;
185 	}
186 
187 	sc->nr_scanned = nr_scanned;
188 	if (nr_scanned >= nr_to_scan || !can_backup)
189 		goto out;
190 
191 	/* If we didn't wake before, try to do it now if needed. */
192 	if (!runtime_pm)
193 		runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
194 
195 	shrink_flags.purge = false;
196 	lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
197 				nr_to_scan, &nr_scanned);
198 	if (lret >= 0)
199 		freed += lret;
200 
201 	sc->nr_scanned = nr_scanned;
202 out:
203 	xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
204 	return nr_scanned ? freed : SHRINK_STOP;
205 }
206 
207 /* Wake up the device for shrinking. */
208 static void xe_shrinker_pm(struct work_struct *work)
209 {
210 	struct xe_shrinker *shrinker =
211 		container_of(work, typeof(*shrinker), pm_worker);
212 
213 	xe_pm_runtime_get(shrinker->xe);
214 	xe_pm_runtime_put(shrinker->xe);
215 }
216 
217 static void xe_shrinker_fini(struct drm_device *drm, void *arg)
218 {
219 	struct xe_shrinker *shrinker = arg;
220 
221 	xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
222 	xe_assert(shrinker->xe, !shrinker->purgeable_pages);
223 	shrinker_free(shrinker->shrink);
224 	flush_work(&shrinker->pm_worker);
225 	kfree(shrinker);
226 }
227 
228 /**
229  * xe_shrinker_create() - Create an xe per-device shrinker
230  * @xe: Pointer to the xe device.
231  *
232  * Return: %0 on success. Negative error code on failure.
233  */
234 int xe_shrinker_create(struct xe_device *xe)
235 {
236 	struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
237 
238 	if (!shrinker)
239 		return -ENOMEM;
240 
241 	shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
242 	if (!shrinker->shrink) {
243 		kfree(shrinker);
244 		return -ENOMEM;
245 	}
246 
247 	INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
248 	shrinker->xe = xe;
249 	rwlock_init(&shrinker->lock);
250 	shrinker->shrink->count_objects = xe_shrinker_count;
251 	shrinker->shrink->scan_objects = xe_shrinker_scan;
252 	shrinker->shrink->private_data = shrinker;
253 	shrinker_register(shrinker->shrink);
254 	xe->mem.shrinker = shrinker;
255 
256 	return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
257 }
258