xref: /linux/drivers/gpu/drm/drm_pagemap_util.c (revision 77f14f2f2d73f6955b856e7c91ca9fb7046da191)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_drv.h>
7 #include <drm/drm_managed.h>
8 #include <drm/drm_pagemap.h>
9 #include <drm/drm_pagemap_util.h>
10 #include <drm/drm_print.h>
11 
12 /**
13  * struct drm_pagemap_cache - Lookup structure for pagemaps
14  *
15  * Structure to keep track of active (refcount > 1) and inactive
16  * (refcount == 0) pagemaps. Inactive pagemaps can be made active
17  * again by waiting for the @queued completion (indicating that the
18  * pagemap has been put on the @shrinker's list of shrinkable
19  * pagemaps, and then successfully removing it from @shrinker's
20  * list. The latter may fail if the shrinker is already in the
21  * process of freeing the pagemap. A struct drm_pagemap_cache can
22  * hold a single struct drm_pagemap.
23  */
24 struct drm_pagemap_cache {
25 	/** @lookup_mutex: Mutex making the lookup process atomic */
26 	struct mutex lookup_mutex;
27 	/** @lock: Lock protecting the @dpagemap pointer */
28 	spinlock_t lock;
29 	/** @shrinker: Pointer to the shrinker used for this cache. Immutable. */
30 	struct drm_pagemap_shrinker *shrinker;
31 	/** @dpagemap: Non-refcounted pointer to the drm_pagemap */
32 	struct drm_pagemap *dpagemap;
33 	/**
34 	 * @queued: Signals when an inactive drm_pagemap has been put on
35 	 * @shrinker's list.
36 	 */
37 	struct completion queued;
38 };
39 
40 /**
41  * struct drm_pagemap_shrinker - Shrinker to remove unused pagemaps
42  */
43 struct drm_pagemap_shrinker {
44 	/** @drm: Pointer to the drm device. */
45 	struct drm_device *drm;
46 	/** @lock: Spinlock to protect the @dpagemaps list. */
47 	spinlock_t lock;
48 	/** @dpagemaps: List of unused dpagemaps. */
49 	struct list_head dpagemaps;
50 	/** @num_dpagemaps: Number of unused dpagemaps in @dpagemaps. */
51 	atomic_t num_dpagemaps;
52 	/** @shrink: Pointer to the struct shrinker. */
53 	struct shrinker *shrink;
54 };
55 
56 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
57 
58 static void drm_pagemap_cache_fini(void *arg)
59 {
60 	struct drm_pagemap_cache *cache = arg;
61 	struct drm_pagemap *dpagemap;
62 
63 	drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
64 	spin_lock(&cache->lock);
65 	dpagemap = cache->dpagemap;
66 	if (!dpagemap) {
67 		spin_unlock(&cache->lock);
68 		goto out;
69 	}
70 
71 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
72 		cache->dpagemap = NULL;
73 		spin_unlock(&cache->lock);
74 		drm_pagemap_destroy(dpagemap, false);
75 	}
76 
77 out:
78 	mutex_destroy(&cache->lookup_mutex);
79 	kfree(cache);
80 }
81 
82 /**
83  * drm_pagemap_cache_create_devm() - Create a drm_pagemap_cache
84  * @shrinker: Pointer to a struct drm_pagemap_shrinker.
85  *
86  * Create a device-managed drm_pagemap cache. The cache is automatically
87  * destroyed on struct device removal, at which point any *inactive*
88  * drm_pagemap's are destroyed.
89  *
90  * Return: Pointer to a struct drm_pagemap_cache on success. Error pointer
91  * on failure.
92  */
93 struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
94 {
95 	struct drm_pagemap_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
96 	int err;
97 
98 	if (!cache)
99 		return ERR_PTR(-ENOMEM);
100 
101 	mutex_init(&cache->lookup_mutex);
102 	spin_lock_init(&cache->lock);
103 	cache->shrinker = shrinker;
104 	init_completion(&cache->queued);
105 	err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
106 	if (err)
107 		return ERR_PTR(err);
108 
109 	return cache;
110 }
111 EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
112 
113 /**
114  * DOC: Cache lookup
115  *
116  * Cache lookup should be done under a locked mutex, so that a
117  * failed drm_pagemap_get_from_cache() and a following
118  * drm_pagemap_cache_setpagemap() are carried out as an atomic
119  * operation WRT other lookups. Otherwise, racing lookups may
120  * unnecessarily concurrently create pagemaps to fulfill a
121  * failed lookup. The API provides two functions to perform this lock,
122  * drm_pagemap_lock_lookup() and drm_pagemap_unlock_lookup() and they
123  * should be used in the following way:
124  *
125  * .. code-block:: c
126  *
127  *		drm_pagemap_lock_lookup(cache);
128  *		dpagemap = drm_pagemap_get_from_cache(cache);
129  *		if (dpagemap)
130  *			goto out_unlock;
131  *
132  *		dpagemap = driver_create_new_dpagemap();
133  *		if (!IS_ERR(dpagemap))
134  *			drm_pagemap_cache_set_pagemap(cache, dpagemap);
135  *
136  *     out_unlock:
137  *		drm_pagemap_unlock_lookup(cache);
138  */
139 
140 /**
141  * drm_pagemap_cache_lock_lookup() - Lock a drm_pagemap_cache for lookup.
142  * @cache: The drm_pagemap_cache to lock.
143  *
144  * Return: %-EINTR if interrupted while blocking. %0 otherwise.
145  */
146 int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
147 {
148 	return mutex_lock_interruptible(&cache->lookup_mutex);
149 }
150 EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
151 
152 /**
153  * drm_pagemap_cache_unlock_lookup() - Unlock a drm_pagemap_cache after lookup.
154  * @cache: The drm_pagemap_cache to unlock.
155  */
156 void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
157 {
158 	mutex_unlock(&cache->lookup_mutex);
159 }
160 EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
161 
162 /**
163  * drm_pagemap_get_from_cache() - Lookup of drm_pagemaps.
164  * @cache: The cache used for lookup.
165  *
166  * If an active pagemap is present in the cache, it is immediately returned.
167  * If an inactive pagemap is present, it's removed from the shrinker list and
168  * an attempt is made to make it active.
169  * If no pagemap present or the attempt to make it active failed, %NULL is returned
170  * to indicate to the caller to create a new drm_pagemap and insert it into
171  * the cache.
172  *
173  * Return: A reference-counted pointer to a drm_pagemap if successful. An error
174  * pointer if an error occurred, or %NULL if no drm_pagemap was found and
175  * the caller should insert a new one.
176  */
177 struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
178 {
179 	struct drm_pagemap *dpagemap;
180 	int err;
181 
182 	lockdep_assert_held(&cache->lookup_mutex);
183 retry:
184 	spin_lock(&cache->lock);
185 	dpagemap = cache->dpagemap;
186 	if (drm_pagemap_get_unless_zero(dpagemap)) {
187 		spin_unlock(&cache->lock);
188 		return dpagemap;
189 	}
190 
191 	if (!dpagemap) {
192 		spin_unlock(&cache->lock);
193 		return NULL;
194 	}
195 
196 	if (!try_wait_for_completion(&cache->queued)) {
197 		spin_unlock(&cache->lock);
198 		err = wait_for_completion_interruptible(&cache->queued);
199 		if (err)
200 			return ERR_PTR(err);
201 		goto retry;
202 	}
203 
204 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
205 		cache->dpagemap = NULL;
206 		spin_unlock(&cache->lock);
207 		err = drm_pagemap_reinit(dpagemap);
208 		if (err) {
209 			drm_pagemap_destroy(dpagemap, false);
210 			return ERR_PTR(err);
211 		}
212 		drm_pagemap_cache_set_pagemap(cache, dpagemap);
213 	} else {
214 		cache->dpagemap = NULL;
215 		spin_unlock(&cache->lock);
216 		dpagemap = NULL;
217 	}
218 
219 	return dpagemap;
220 }
221 EXPORT_SYMBOL(drm_pagemap_get_from_cache);
222 
223 /**
224  * drm_pagemap_cache_set_pagemap() - Assign a drm_pagemap to a drm_pagemap_cache
225  * @cache: The cache to assign the drm_pagemap to.
226  * @dpagemap: The drm_pagemap to assign.
227  *
228  * The function must be called to populate a drm_pagemap_cache only
229  * after a call to drm_pagemap_get_from_cache() returns NULL.
230  */
231 void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
232 {
233 	struct drm_device *drm = dpagemap->drm;
234 
235 	lockdep_assert_held(&cache->lookup_mutex);
236 	spin_lock(&cache->lock);
237 	dpagemap->cache = cache;
238 	swap(cache->dpagemap, dpagemap);
239 	reinit_completion(&cache->queued);
240 	spin_unlock(&cache->lock);
241 	drm_WARN_ON(drm, !!dpagemap);
242 }
243 EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
244 
245 /**
246  * drm_pagemap_get_from_cache_if_active() - Quick lookup of active drm_pagemaps
247  * @cache: The cache to lookup from.
248  *
249  * Function that should be used to lookup a drm_pagemap that is already active.
250  * (refcount > 0).
251  *
252  * Return: A pointer to the cache's drm_pagemap if it's active; %NULL otherwise.
253  */
254 struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
255 {
256 	struct drm_pagemap *dpagemap;
257 
258 	spin_lock(&cache->lock);
259 	dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
260 	spin_unlock(&cache->lock);
261 
262 	return dpagemap;
263 }
264 EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
265 
266 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
267 {
268 	struct drm_pagemap_cache *cache = dpagemap->cache;
269 	struct drm_pagemap_shrinker *shrinker = cache->shrinker;
270 
271 	spin_lock(&shrinker->lock);
272 	if (list_empty(&dpagemap->shrink_link)) {
273 		spin_unlock(&shrinker->lock);
274 		return false;
275 	}
276 
277 	list_del_init(&dpagemap->shrink_link);
278 	atomic_dec(&shrinker->num_dpagemaps);
279 	spin_unlock(&shrinker->lock);
280 	return true;
281 }
282 
283 #ifdef CONFIG_PROVE_LOCKING
284 /**
285  * drm_pagemap_shrinker_might_lock() - lockdep test for drm_pagemap_shrinker_add()
286  * @dpagemap: The drm pagemap.
287  *
288  * The drm_pagemap_shrinker_add() function performs some locking.
289  * This function can be called in code-paths that might
290  * call drm_pagemap_shrinker_add() to detect any lockdep problems early.
291  */
292 void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
293 {
294 	int idx;
295 
296 	if (drm_dev_enter(dpagemap->drm, &idx)) {
297 		struct drm_pagemap_cache *cache = dpagemap->cache;
298 
299 		if (cache)
300 			might_lock(&cache->shrinker->lock);
301 
302 		drm_dev_exit(idx);
303 	}
304 }
305 #endif
306 
307 /**
308  * drm_pagemap_shrinker_add() - Add a drm_pagemap to the shrinker list or destroy
309  * @dpagemap: The drm_pagemap.
310  *
311  * If @dpagemap is associated with a &struct drm_pagemap_cache AND the
312  * struct device backing the drm device is still alive, add @dpagemap to
313  * the &struct drm_pagemap_shrinker list of shrinkable drm_pagemaps.
314  *
315  * Otherwise destroy the pagemap directly using drm_pagemap_destroy().
316  *
317  * This is an internal function which is not intended to be exposed to drivers.
318  */
319 void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
320 {
321 	struct drm_pagemap_cache *cache;
322 	struct drm_pagemap_shrinker *shrinker;
323 	int idx;
324 
325 	/*
326 	 * The pagemap cache and shrinker are disabled at
327 	 * pci device remove time. After that, dpagemaps
328 	 * are freed directly.
329 	 */
330 	if (!drm_dev_enter(dpagemap->drm, &idx))
331 		goto out_no_cache;
332 
333 	cache = dpagemap->cache;
334 	if (!cache) {
335 		drm_dev_exit(idx);
336 		goto out_no_cache;
337 	}
338 
339 	shrinker = cache->shrinker;
340 	spin_lock(&shrinker->lock);
341 	list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
342 	atomic_inc(&shrinker->num_dpagemaps);
343 	spin_unlock(&shrinker->lock);
344 	complete_all(&cache->queued);
345 	drm_dev_exit(idx);
346 	return;
347 
348 out_no_cache:
349 	drm_pagemap_destroy(dpagemap, true);
350 }
351 
352 static unsigned long
353 drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
354 {
355 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
356 	unsigned long count = atomic_read(&shrinker->num_dpagemaps);
357 
358 	return count ? : SHRINK_EMPTY;
359 }
360 
361 static unsigned long
362 drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
363 {
364 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
365 	struct drm_pagemap *dpagemap;
366 	struct drm_pagemap_cache *cache;
367 	unsigned long nr_freed = 0;
368 
369 	sc->nr_scanned = 0;
370 	spin_lock(&shrinker->lock);
371 	do {
372 		dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
373 						    shrink_link);
374 		if (!dpagemap)
375 			break;
376 
377 		atomic_dec(&shrinker->num_dpagemaps);
378 		list_del_init(&dpagemap->shrink_link);
379 		spin_unlock(&shrinker->lock);
380 
381 		sc->nr_scanned++;
382 		nr_freed++;
383 
384 		cache = dpagemap->cache;
385 		spin_lock(&cache->lock);
386 		cache->dpagemap = NULL;
387 		spin_unlock(&cache->lock);
388 
389 		drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
390 		drm_pagemap_destroy(dpagemap, true);
391 		spin_lock(&shrinker->lock);
392 	} while (sc->nr_scanned < sc->nr_to_scan);
393 	spin_unlock(&shrinker->lock);
394 
395 	return sc->nr_scanned ? nr_freed : SHRINK_STOP;
396 }
397 
398 static void drm_pagemap_shrinker_fini(void *arg)
399 {
400 	struct drm_pagemap_shrinker *shrinker = arg;
401 
402 	drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
403 	drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
404 	shrinker_free(shrinker->shrink);
405 	kfree(shrinker);
406 }
407 
408 /**
409  * drm_pagemap_shrinker_create_devm() - Create and register a pagemap shrinker
410  * @drm: The drm device
411  *
412  * Create and register a pagemap shrinker that shrinks unused pagemaps
413  * and thereby reduces memory footprint.
414  * The shrinker is drm_device managed and unregisters itself when
415  * the drm device is removed.
416  *
417  * Return: %0 on success, negative error code on failure.
418  */
419 struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
420 {
421 	struct drm_pagemap_shrinker *shrinker;
422 	struct shrinker *shrink;
423 	int err;
424 
425 	shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
426 	if (!shrinker)
427 		return ERR_PTR(-ENOMEM);
428 
429 	shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
430 	if (!shrink) {
431 		kfree(shrinker);
432 		return ERR_PTR(-ENOMEM);
433 	}
434 
435 	spin_lock_init(&shrinker->lock);
436 	INIT_LIST_HEAD(&shrinker->dpagemaps);
437 	shrinker->drm = drm;
438 	shrinker->shrink = shrink;
439 	shrink->count_objects = drm_pagemap_shrinker_count;
440 	shrink->scan_objects = drm_pagemap_shrinker_scan;
441 	shrink->private_data = shrinker;
442 	shrinker_register(shrink);
443 
444 	err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
445 	if (err)
446 		return ERR_PTR(err);
447 
448 	return shrinker;
449 }
450 EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
451