xref: /linux/drivers/gpu/drm/drm_pagemap_util.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/slab.h>
7 
8 #include <drm/drm_drv.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_pagemap.h>
11 #include <drm/drm_pagemap_util.h>
12 #include <drm/drm_print.h>
13 
14 /**
15  * struct drm_pagemap_cache - Lookup structure for pagemaps
16  *
17  * Structure to keep track of active (refcount > 1) and inactive
18  * (refcount == 0) pagemaps. Inactive pagemaps can be made active
19  * again by waiting for the @queued completion (indicating that the
20  * pagemap has been put on the @shrinker's list of shrinkable
21  * pagemaps, and then successfully removing it from @shrinker's
22  * list. The latter may fail if the shrinker is already in the
23  * process of freeing the pagemap. A struct drm_pagemap_cache can
24  * hold a single struct drm_pagemap.
25  */
26 struct drm_pagemap_cache {
27 	/** @lookup_mutex: Mutex making the lookup process atomic */
28 	struct mutex lookup_mutex;
29 	/** @lock: Lock protecting the @dpagemap pointer */
30 	spinlock_t lock;
31 	/** @shrinker: Pointer to the shrinker used for this cache. Immutable. */
32 	struct drm_pagemap_shrinker *shrinker;
33 	/** @dpagemap: Non-refcounted pointer to the drm_pagemap */
34 	struct drm_pagemap *dpagemap;
35 	/**
36 	 * @queued: Signals when an inactive drm_pagemap has been put on
37 	 * @shrinker's list.
38 	 */
39 	struct completion queued;
40 };
41 
42 /**
43  * struct drm_pagemap_shrinker - Shrinker to remove unused pagemaps
44  */
45 struct drm_pagemap_shrinker {
46 	/** @drm: Pointer to the drm device. */
47 	struct drm_device *drm;
48 	/** @lock: Spinlock to protect the @dpagemaps list. */
49 	spinlock_t lock;
50 	/** @dpagemaps: List of unused dpagemaps. */
51 	struct list_head dpagemaps;
52 	/** @num_dpagemaps: Number of unused dpagemaps in @dpagemaps. */
53 	atomic_t num_dpagemaps;
54 	/** @shrink: Pointer to the struct shrinker. */
55 	struct shrinker *shrink;
56 };
57 
58 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
59 
drm_pagemap_cache_fini(void * arg)60 static void drm_pagemap_cache_fini(void *arg)
61 {
62 	struct drm_pagemap_cache *cache = arg;
63 	struct drm_pagemap *dpagemap;
64 
65 	drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
66 	spin_lock(&cache->lock);
67 	dpagemap = cache->dpagemap;
68 	if (!dpagemap) {
69 		spin_unlock(&cache->lock);
70 		goto out;
71 	}
72 
73 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
74 		cache->dpagemap = NULL;
75 		spin_unlock(&cache->lock);
76 		drm_pagemap_destroy(dpagemap, false);
77 	}
78 
79 out:
80 	mutex_destroy(&cache->lookup_mutex);
81 	kfree(cache);
82 }
83 
84 /**
85  * drm_pagemap_cache_create_devm() - Create a drm_pagemap_cache
86  * @shrinker: Pointer to a struct drm_pagemap_shrinker.
87  *
88  * Create a device-managed drm_pagemap cache. The cache is automatically
89  * destroyed on struct device removal, at which point any *inactive*
90  * drm_pagemap's are destroyed.
91  *
92  * Return: Pointer to a struct drm_pagemap_cache on success. Error pointer
93  * on failure.
94  */
drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker * shrinker)95 struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
96 {
97 	struct drm_pagemap_cache *cache = kzalloc_obj(*cache);
98 	int err;
99 
100 	if (!cache)
101 		return ERR_PTR(-ENOMEM);
102 
103 	mutex_init(&cache->lookup_mutex);
104 	spin_lock_init(&cache->lock);
105 	cache->shrinker = shrinker;
106 	init_completion(&cache->queued);
107 	err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
108 	if (err)
109 		return ERR_PTR(err);
110 
111 	return cache;
112 }
113 EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
114 
115 /**
116  * DOC: Cache lookup
117  *
118  * Cache lookup should be done under a locked mutex, so that a
119  * failed drm_pagemap_get_from_cache() and a following
120  * drm_pagemap_cache_setpagemap() are carried out as an atomic
121  * operation WRT other lookups. Otherwise, racing lookups may
122  * unnecessarily concurrently create pagemaps to fulfill a
123  * failed lookup. The API provides two functions to perform this lock,
124  * drm_pagemap_lock_lookup() and drm_pagemap_unlock_lookup() and they
125  * should be used in the following way:
126  *
127  * .. code-block:: c
128  *
129  *		drm_pagemap_lock_lookup(cache);
130  *		dpagemap = drm_pagemap_get_from_cache(cache);
131  *		if (dpagemap)
132  *			goto out_unlock;
133  *
134  *		dpagemap = driver_create_new_dpagemap();
135  *		if (!IS_ERR(dpagemap))
136  *			drm_pagemap_cache_set_pagemap(cache, dpagemap);
137  *
138  *     out_unlock:
139  *		drm_pagemap_unlock_lookup(cache);
140  */
141 
142 /**
143  * drm_pagemap_cache_lock_lookup() - Lock a drm_pagemap_cache for lookup.
144  * @cache: The drm_pagemap_cache to lock.
145  *
146  * Return: %-EINTR if interrupted while blocking. %0 otherwise.
147  */
drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache * cache)148 int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
149 {
150 	return mutex_lock_interruptible(&cache->lookup_mutex);
151 }
152 EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
153 
154 /**
155  * drm_pagemap_cache_unlock_lookup() - Unlock a drm_pagemap_cache after lookup.
156  * @cache: The drm_pagemap_cache to unlock.
157  */
drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache * cache)158 void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
159 {
160 	mutex_unlock(&cache->lookup_mutex);
161 }
162 EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
163 
164 /**
165  * drm_pagemap_get_from_cache() - Lookup of drm_pagemaps.
166  * @cache: The cache used for lookup.
167  *
168  * If an active pagemap is present in the cache, it is immediately returned.
169  * If an inactive pagemap is present, it's removed from the shrinker list and
170  * an attempt is made to make it active.
171  * If no pagemap present or the attempt to make it active failed, %NULL is returned
172  * to indicate to the caller to create a new drm_pagemap and insert it into
173  * the cache.
174  *
175  * Return: A reference-counted pointer to a drm_pagemap if successful. An error
176  * pointer if an error occurred, or %NULL if no drm_pagemap was found and
177  * the caller should insert a new one.
178  */
drm_pagemap_get_from_cache(struct drm_pagemap_cache * cache)179 struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
180 {
181 	struct drm_pagemap *dpagemap;
182 	int err;
183 
184 	lockdep_assert_held(&cache->lookup_mutex);
185 retry:
186 	spin_lock(&cache->lock);
187 	dpagemap = cache->dpagemap;
188 	if (drm_pagemap_get_unless_zero(dpagemap)) {
189 		spin_unlock(&cache->lock);
190 		return dpagemap;
191 	}
192 
193 	if (!dpagemap) {
194 		spin_unlock(&cache->lock);
195 		return NULL;
196 	}
197 
198 	if (!try_wait_for_completion(&cache->queued)) {
199 		spin_unlock(&cache->lock);
200 		err = wait_for_completion_interruptible(&cache->queued);
201 		if (err)
202 			return ERR_PTR(err);
203 		goto retry;
204 	}
205 
206 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
207 		cache->dpagemap = NULL;
208 		spin_unlock(&cache->lock);
209 		err = drm_pagemap_reinit(dpagemap);
210 		if (err) {
211 			drm_pagemap_destroy(dpagemap, false);
212 			return ERR_PTR(err);
213 		}
214 		drm_pagemap_cache_set_pagemap(cache, dpagemap);
215 	} else {
216 		cache->dpagemap = NULL;
217 		spin_unlock(&cache->lock);
218 		dpagemap = NULL;
219 	}
220 
221 	return dpagemap;
222 }
223 EXPORT_SYMBOL(drm_pagemap_get_from_cache);
224 
225 /**
226  * drm_pagemap_cache_set_pagemap() - Assign a drm_pagemap to a drm_pagemap_cache
227  * @cache: The cache to assign the drm_pagemap to.
228  * @dpagemap: The drm_pagemap to assign.
229  *
230  * The function must be called to populate a drm_pagemap_cache only
231  * after a call to drm_pagemap_get_from_cache() returns NULL.
232  */
drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache * cache,struct drm_pagemap * dpagemap)233 void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
234 {
235 	struct drm_device *drm = dpagemap->drm;
236 
237 	lockdep_assert_held(&cache->lookup_mutex);
238 	spin_lock(&cache->lock);
239 	dpagemap->cache = cache;
240 	swap(cache->dpagemap, dpagemap);
241 	reinit_completion(&cache->queued);
242 	spin_unlock(&cache->lock);
243 	drm_WARN_ON(drm, !!dpagemap);
244 }
245 EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
246 
247 /**
248  * drm_pagemap_get_from_cache_if_active() - Quick lookup of active drm_pagemaps
249  * @cache: The cache to lookup from.
250  *
251  * Function that should be used to lookup a drm_pagemap that is already active.
252  * (refcount > 0).
253  *
254  * Return: A pointer to the cache's drm_pagemap if it's active; %NULL otherwise.
255  */
drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache * cache)256 struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
257 {
258 	struct drm_pagemap *dpagemap;
259 
260 	spin_lock(&cache->lock);
261 	dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
262 	spin_unlock(&cache->lock);
263 
264 	return dpagemap;
265 }
266 EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
267 
drm_pagemap_shrinker_cancel(struct drm_pagemap * dpagemap)268 static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
269 {
270 	struct drm_pagemap_cache *cache = dpagemap->cache;
271 	struct drm_pagemap_shrinker *shrinker = cache->shrinker;
272 
273 	spin_lock(&shrinker->lock);
274 	if (list_empty(&dpagemap->shrink_link)) {
275 		spin_unlock(&shrinker->lock);
276 		return false;
277 	}
278 
279 	list_del_init(&dpagemap->shrink_link);
280 	atomic_dec(&shrinker->num_dpagemaps);
281 	spin_unlock(&shrinker->lock);
282 	return true;
283 }
284 
285 #ifdef CONFIG_PROVE_LOCKING
286 /**
287  * drm_pagemap_shrinker_might_lock() - lockdep test for drm_pagemap_shrinker_add()
288  * @dpagemap: The drm pagemap.
289  *
290  * The drm_pagemap_shrinker_add() function performs some locking.
291  * This function can be called in code-paths that might
292  * call drm_pagemap_shrinker_add() to detect any lockdep problems early.
293  */
drm_pagemap_shrinker_might_lock(struct drm_pagemap * dpagemap)294 void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
295 {
296 	int idx;
297 
298 	if (drm_dev_enter(dpagemap->drm, &idx)) {
299 		struct drm_pagemap_cache *cache = dpagemap->cache;
300 
301 		if (cache)
302 			might_lock(&cache->shrinker->lock);
303 
304 		drm_dev_exit(idx);
305 	}
306 }
307 #endif
308 
309 /**
310  * drm_pagemap_shrinker_add() - Add a drm_pagemap to the shrinker list or destroy
311  * @dpagemap: The drm_pagemap.
312  *
313  * If @dpagemap is associated with a &struct drm_pagemap_cache AND the
314  * struct device backing the drm device is still alive, add @dpagemap to
315  * the &struct drm_pagemap_shrinker list of shrinkable drm_pagemaps.
316  *
317  * Otherwise destroy the pagemap directly using drm_pagemap_destroy().
318  *
319  * This is an internal function which is not intended to be exposed to drivers.
320  */
drm_pagemap_shrinker_add(struct drm_pagemap * dpagemap)321 void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
322 {
323 	struct drm_pagemap_cache *cache;
324 	struct drm_pagemap_shrinker *shrinker;
325 	int idx;
326 
327 	/*
328 	 * The pagemap cache and shrinker are disabled at
329 	 * pci device remove time. After that, dpagemaps
330 	 * are freed directly.
331 	 */
332 	if (!drm_dev_enter(dpagemap->drm, &idx))
333 		goto out_no_cache;
334 
335 	cache = dpagemap->cache;
336 	if (!cache) {
337 		drm_dev_exit(idx);
338 		goto out_no_cache;
339 	}
340 
341 	shrinker = cache->shrinker;
342 	spin_lock(&shrinker->lock);
343 	list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
344 	atomic_inc(&shrinker->num_dpagemaps);
345 	spin_unlock(&shrinker->lock);
346 	complete_all(&cache->queued);
347 	drm_dev_exit(idx);
348 	return;
349 
350 out_no_cache:
351 	drm_pagemap_destroy(dpagemap, true);
352 }
353 
354 static unsigned long
drm_pagemap_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)355 drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
356 {
357 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
358 	unsigned long count = atomic_read(&shrinker->num_dpagemaps);
359 
360 	return count ? : SHRINK_EMPTY;
361 }
362 
363 static unsigned long
drm_pagemap_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)364 drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
365 {
366 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
367 	struct drm_pagemap *dpagemap;
368 	struct drm_pagemap_cache *cache;
369 	unsigned long nr_freed = 0;
370 
371 	sc->nr_scanned = 0;
372 	spin_lock(&shrinker->lock);
373 	do {
374 		dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
375 						    shrink_link);
376 		if (!dpagemap)
377 			break;
378 
379 		atomic_dec(&shrinker->num_dpagemaps);
380 		list_del_init(&dpagemap->shrink_link);
381 		spin_unlock(&shrinker->lock);
382 
383 		sc->nr_scanned++;
384 		nr_freed++;
385 
386 		cache = dpagemap->cache;
387 		spin_lock(&cache->lock);
388 		cache->dpagemap = NULL;
389 		spin_unlock(&cache->lock);
390 
391 		drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
392 		drm_pagemap_destroy(dpagemap, true);
393 		spin_lock(&shrinker->lock);
394 	} while (sc->nr_scanned < sc->nr_to_scan);
395 	spin_unlock(&shrinker->lock);
396 
397 	return sc->nr_scanned ? nr_freed : SHRINK_STOP;
398 }
399 
drm_pagemap_shrinker_fini(void * arg)400 static void drm_pagemap_shrinker_fini(void *arg)
401 {
402 	struct drm_pagemap_shrinker *shrinker = arg;
403 
404 	drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
405 	drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
406 	shrinker_free(shrinker->shrink);
407 	kfree(shrinker);
408 }
409 
410 /**
411  * drm_pagemap_shrinker_create_devm() - Create and register a pagemap shrinker
412  * @drm: The drm device
413  *
414  * Create and register a pagemap shrinker that shrinks unused pagemaps
415  * and thereby reduces memory footprint.
416  * The shrinker is drm_device managed and unregisters itself when
417  * the drm device is removed.
418  *
419  * Return: %0 on success, negative error code on failure.
420  */
drm_pagemap_shrinker_create_devm(struct drm_device * drm)421 struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
422 {
423 	struct drm_pagemap_shrinker *shrinker;
424 	struct shrinker *shrink;
425 	int err;
426 
427 	shrinker = kzalloc_obj(*shrinker);
428 	if (!shrinker)
429 		return ERR_PTR(-ENOMEM);
430 
431 	shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
432 	if (!shrink) {
433 		kfree(shrinker);
434 		return ERR_PTR(-ENOMEM);
435 	}
436 
437 	spin_lock_init(&shrinker->lock);
438 	INIT_LIST_HEAD(&shrinker->dpagemaps);
439 	shrinker->drm = drm;
440 	shrinker->shrink = shrink;
441 	shrink->count_objects = drm_pagemap_shrinker_count;
442 	shrink->scan_objects = drm_pagemap_shrinker_scan;
443 	shrink->private_data = shrinker;
444 	shrinker_register(shrink);
445 
446 	err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
447 	if (err)
448 		return ERR_PTR(err);
449 
450 	return shrinker;
451 }
452 EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
453 
454 /**
455  * struct drm_pagemap_owner - Device interconnect group
456  * @kref: Reference count.
457  *
458  * A struct drm_pagemap_owner identifies a device interconnect group.
459  */
460 struct drm_pagemap_owner {
461 	struct kref kref;
462 };
463 
drm_pagemap_owner_release(struct kref * kref)464 static void drm_pagemap_owner_release(struct kref *kref)
465 {
466 	kfree(container_of(kref, struct drm_pagemap_owner, kref));
467 }
468 
469 /**
470  * drm_pagemap_release_owner() - Stop participating in an interconnect group
471  * @peer: Pointer to the struct drm_pagemap_peer used when joining the group
472  *
473  * Stop participating in an interconnect group. This function is typically
474  * called when a pagemap is removed to indicate that it doesn't need to
475  * be taken into account.
476  */
drm_pagemap_release_owner(struct drm_pagemap_peer * peer)477 void drm_pagemap_release_owner(struct drm_pagemap_peer *peer)
478 {
479 	struct drm_pagemap_owner_list *owner_list = peer->list;
480 
481 	if (!owner_list)
482 		return;
483 
484 	mutex_lock(&owner_list->lock);
485 	list_del(&peer->link);
486 	kref_put(&peer->owner->kref, drm_pagemap_owner_release);
487 	peer->owner = NULL;
488 	mutex_unlock(&owner_list->lock);
489 }
490 EXPORT_SYMBOL(drm_pagemap_release_owner);
491 
492 /**
493  * typedef interconnect_fn - Callback function to identify fast interconnects
494  * @peer1: First endpoint.
495  * @peer2: Second endpont.
496  *
497  * The function returns %true iff @peer1 and @peer2 have a fast interconnect.
498  * Note that this is symmetrical. The function has no notion of client and provider,
499  * which may not be sufficient in some cases. However, since the callback is intended
500  * to guide in providing common pagemap owners, the notion of a common owner to
501  * indicate fast interconnects would then have to change as well.
502  *
503  * Return: %true iff @peer1 and @peer2 have a fast interconnect. Otherwise @false.
504  */
505 typedef bool (*interconnect_fn)(struct drm_pagemap_peer *peer1, struct drm_pagemap_peer *peer2);
506 
507 /**
508  * drm_pagemap_acquire_owner() - Join an interconnect group
509  * @peer: A struct drm_pagemap_peer keeping track of the device interconnect
510  * @owner_list: Pointer to the owner_list, keeping track of all interconnects
511  * @has_interconnect: Callback function to determine whether two peers have a
512  * fast local interconnect.
513  *
514  * Repeatedly calls @has_interconnect for @peer and other peers on @owner_list to
515  * determine a set of peers for which @peer has a fast interconnect. That set will
516  * have common &struct drm_pagemap_owner, and upon successful return, @peer::owner
517  * will point to that struct, holding a reference, and @peer will be registered in
518  * @owner_list. If @peer doesn't have any fast interconnects to other @peers, a
519  * new unique &struct drm_pagemap_owner will be allocated for it, and that
520  * may be shared with other peers that, at a later point, are determined to have
521  * a fast interconnect with @peer.
522  *
523  * When @peer no longer participates in an interconnect group,
524  * drm_pagemap_release_owner() should be called to drop the reference on the
525  * struct drm_pagemap_owner.
526  *
527  * Return: %0 on success, negative error code on failure.
528  */
drm_pagemap_acquire_owner(struct drm_pagemap_peer * peer,struct drm_pagemap_owner_list * owner_list,interconnect_fn has_interconnect)529 int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
530 			      struct drm_pagemap_owner_list *owner_list,
531 			      interconnect_fn has_interconnect)
532 {
533 	struct drm_pagemap_peer *cur_peer;
534 	struct drm_pagemap_owner *owner = NULL;
535 	bool interconnect = false;
536 
537 	mutex_lock(&owner_list->lock);
538 	might_alloc(GFP_KERNEL);
539 	list_for_each_entry(cur_peer, &owner_list->peers, link) {
540 		if (cur_peer->owner != owner) {
541 			if (owner && interconnect)
542 				break;
543 			owner = cur_peer->owner;
544 			interconnect = true;
545 		}
546 		if (interconnect && !has_interconnect(peer, cur_peer))
547 			interconnect = false;
548 	}
549 
550 	if (!interconnect) {
551 		owner = kmalloc_obj(*owner);
552 		if (!owner) {
553 			mutex_unlock(&owner_list->lock);
554 			return -ENOMEM;
555 		}
556 		kref_init(&owner->kref);
557 		list_add_tail(&peer->link, &owner_list->peers);
558 	} else {
559 		kref_get(&owner->kref);
560 		list_add_tail(&peer->link, &cur_peer->link);
561 	}
562 	peer->owner = owner;
563 	peer->list = owner_list;
564 	mutex_unlock(&owner_list->lock);
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL(drm_pagemap_acquire_owner);
569