xref: /linux/drivers/gpu/drm/drm_pagemap_util.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
177f14f2fSThomas Hellström // SPDX-License-Identifier: GPL-2.0-only OR MIT
277f14f2fSThomas Hellström /*
377f14f2fSThomas Hellström  * Copyright © 2025 Intel Corporation
477f14f2fSThomas Hellström  */
577f14f2fSThomas Hellström 
6e44f47a9SThomas Hellström #include <linux/slab.h>
7e44f47a9SThomas Hellström 
877f14f2fSThomas Hellström #include <drm/drm_drv.h>
977f14f2fSThomas Hellström #include <drm/drm_managed.h>
1077f14f2fSThomas Hellström #include <drm/drm_pagemap.h>
1177f14f2fSThomas Hellström #include <drm/drm_pagemap_util.h>
1277f14f2fSThomas Hellström #include <drm/drm_print.h>
1377f14f2fSThomas Hellström 
1477f14f2fSThomas Hellström /**
1577f14f2fSThomas Hellström  * struct drm_pagemap_cache - Lookup structure for pagemaps
1677f14f2fSThomas Hellström  *
1777f14f2fSThomas Hellström  * Structure to keep track of active (refcount > 1) and inactive
1877f14f2fSThomas Hellström  * (refcount == 0) pagemaps. Inactive pagemaps can be made active
1977f14f2fSThomas Hellström  * again by waiting for the @queued completion (indicating that the
2077f14f2fSThomas Hellström  * pagemap has been put on the @shrinker's list of shrinkable
2177f14f2fSThomas Hellström  * pagemaps, and then successfully removing it from @shrinker's
2277f14f2fSThomas Hellström  * list. The latter may fail if the shrinker is already in the
2377f14f2fSThomas Hellström  * process of freeing the pagemap. A struct drm_pagemap_cache can
2477f14f2fSThomas Hellström  * hold a single struct drm_pagemap.
2577f14f2fSThomas Hellström  */
2677f14f2fSThomas Hellström struct drm_pagemap_cache {
2777f14f2fSThomas Hellström 	/** @lookup_mutex: Mutex making the lookup process atomic */
2877f14f2fSThomas Hellström 	struct mutex lookup_mutex;
2977f14f2fSThomas Hellström 	/** @lock: Lock protecting the @dpagemap pointer */
3077f14f2fSThomas Hellström 	spinlock_t lock;
3177f14f2fSThomas Hellström 	/** @shrinker: Pointer to the shrinker used for this cache. Immutable. */
3277f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker;
3377f14f2fSThomas Hellström 	/** @dpagemap: Non-refcounted pointer to the drm_pagemap */
3477f14f2fSThomas Hellström 	struct drm_pagemap *dpagemap;
3577f14f2fSThomas Hellström 	/**
3677f14f2fSThomas Hellström 	 * @queued: Signals when an inactive drm_pagemap has been put on
3777f14f2fSThomas Hellström 	 * @shrinker's list.
3877f14f2fSThomas Hellström 	 */
3977f14f2fSThomas Hellström 	struct completion queued;
4077f14f2fSThomas Hellström };
4177f14f2fSThomas Hellström 
4277f14f2fSThomas Hellström /**
4377f14f2fSThomas Hellström  * struct drm_pagemap_shrinker - Shrinker to remove unused pagemaps
4477f14f2fSThomas Hellström  */
4577f14f2fSThomas Hellström struct drm_pagemap_shrinker {
4677f14f2fSThomas Hellström 	/** @drm: Pointer to the drm device. */
4777f14f2fSThomas Hellström 	struct drm_device *drm;
4877f14f2fSThomas Hellström 	/** @lock: Spinlock to protect the @dpagemaps list. */
4977f14f2fSThomas Hellström 	spinlock_t lock;
5077f14f2fSThomas Hellström 	/** @dpagemaps: List of unused dpagemaps. */
5177f14f2fSThomas Hellström 	struct list_head dpagemaps;
5277f14f2fSThomas Hellström 	/** @num_dpagemaps: Number of unused dpagemaps in @dpagemaps. */
5377f14f2fSThomas Hellström 	atomic_t num_dpagemaps;
5477f14f2fSThomas Hellström 	/** @shrink: Pointer to the struct shrinker. */
5577f14f2fSThomas Hellström 	struct shrinker *shrink;
5677f14f2fSThomas Hellström };
5777f14f2fSThomas Hellström 
5877f14f2fSThomas Hellström static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
5977f14f2fSThomas Hellström 
6077f14f2fSThomas Hellström static void drm_pagemap_cache_fini(void *arg)
6177f14f2fSThomas Hellström {
6277f14f2fSThomas Hellström 	struct drm_pagemap_cache *cache = arg;
6377f14f2fSThomas Hellström 	struct drm_pagemap *dpagemap;
6477f14f2fSThomas Hellström 
6577f14f2fSThomas Hellström 	drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
6677f14f2fSThomas Hellström 	spin_lock(&cache->lock);
6777f14f2fSThomas Hellström 	dpagemap = cache->dpagemap;
6877f14f2fSThomas Hellström 	if (!dpagemap) {
6977f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
7077f14f2fSThomas Hellström 		goto out;
7177f14f2fSThomas Hellström 	}
7277f14f2fSThomas Hellström 
7377f14f2fSThomas Hellström 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
7477f14f2fSThomas Hellström 		cache->dpagemap = NULL;
7577f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
7677f14f2fSThomas Hellström 		drm_pagemap_destroy(dpagemap, false);
7777f14f2fSThomas Hellström 	}
7877f14f2fSThomas Hellström 
7977f14f2fSThomas Hellström out:
8077f14f2fSThomas Hellström 	mutex_destroy(&cache->lookup_mutex);
8177f14f2fSThomas Hellström 	kfree(cache);
8277f14f2fSThomas Hellström }
8377f14f2fSThomas Hellström 
8477f14f2fSThomas Hellström /**
8577f14f2fSThomas Hellström  * drm_pagemap_cache_create_devm() - Create a drm_pagemap_cache
8677f14f2fSThomas Hellström  * @shrinker: Pointer to a struct drm_pagemap_shrinker.
8777f14f2fSThomas Hellström  *
8877f14f2fSThomas Hellström  * Create a device-managed drm_pagemap cache. The cache is automatically
8977f14f2fSThomas Hellström  * destroyed on struct device removal, at which point any *inactive*
9077f14f2fSThomas Hellström  * drm_pagemap's are destroyed.
9177f14f2fSThomas Hellström  *
9277f14f2fSThomas Hellström  * Return: Pointer to a struct drm_pagemap_cache on success. Error pointer
9377f14f2fSThomas Hellström  * on failure.
9477f14f2fSThomas Hellström  */
9577f14f2fSThomas Hellström struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
9677f14f2fSThomas Hellström {
97*bf4afc53SLinus Torvalds 	struct drm_pagemap_cache *cache = kzalloc_obj(*cache);
9877f14f2fSThomas Hellström 	int err;
9977f14f2fSThomas Hellström 
10077f14f2fSThomas Hellström 	if (!cache)
10177f14f2fSThomas Hellström 		return ERR_PTR(-ENOMEM);
10277f14f2fSThomas Hellström 
10377f14f2fSThomas Hellström 	mutex_init(&cache->lookup_mutex);
10477f14f2fSThomas Hellström 	spin_lock_init(&cache->lock);
10577f14f2fSThomas Hellström 	cache->shrinker = shrinker;
10677f14f2fSThomas Hellström 	init_completion(&cache->queued);
10777f14f2fSThomas Hellström 	err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
10877f14f2fSThomas Hellström 	if (err)
10977f14f2fSThomas Hellström 		return ERR_PTR(err);
11077f14f2fSThomas Hellström 
11177f14f2fSThomas Hellström 	return cache;
11277f14f2fSThomas Hellström }
11377f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
11477f14f2fSThomas Hellström 
11577f14f2fSThomas Hellström /**
11677f14f2fSThomas Hellström  * DOC: Cache lookup
11777f14f2fSThomas Hellström  *
11877f14f2fSThomas Hellström  * Cache lookup should be done under a locked mutex, so that a
11977f14f2fSThomas Hellström  * failed drm_pagemap_get_from_cache() and a following
12077f14f2fSThomas Hellström  * drm_pagemap_cache_setpagemap() are carried out as an atomic
12177f14f2fSThomas Hellström  * operation WRT other lookups. Otherwise, racing lookups may
12277f14f2fSThomas Hellström  * unnecessarily concurrently create pagemaps to fulfill a
12377f14f2fSThomas Hellström  * failed lookup. The API provides two functions to perform this lock,
12477f14f2fSThomas Hellström  * drm_pagemap_lock_lookup() and drm_pagemap_unlock_lookup() and they
12577f14f2fSThomas Hellström  * should be used in the following way:
12677f14f2fSThomas Hellström  *
12777f14f2fSThomas Hellström  * .. code-block:: c
12877f14f2fSThomas Hellström  *
12977f14f2fSThomas Hellström  *		drm_pagemap_lock_lookup(cache);
13077f14f2fSThomas Hellström  *		dpagemap = drm_pagemap_get_from_cache(cache);
13177f14f2fSThomas Hellström  *		if (dpagemap)
13277f14f2fSThomas Hellström  *			goto out_unlock;
13377f14f2fSThomas Hellström  *
13477f14f2fSThomas Hellström  *		dpagemap = driver_create_new_dpagemap();
13577f14f2fSThomas Hellström  *		if (!IS_ERR(dpagemap))
13677f14f2fSThomas Hellström  *			drm_pagemap_cache_set_pagemap(cache, dpagemap);
13777f14f2fSThomas Hellström  *
13877f14f2fSThomas Hellström  *     out_unlock:
13977f14f2fSThomas Hellström  *		drm_pagemap_unlock_lookup(cache);
14077f14f2fSThomas Hellström  */
14177f14f2fSThomas Hellström 
14277f14f2fSThomas Hellström /**
14377f14f2fSThomas Hellström  * drm_pagemap_cache_lock_lookup() - Lock a drm_pagemap_cache for lookup.
14477f14f2fSThomas Hellström  * @cache: The drm_pagemap_cache to lock.
14577f14f2fSThomas Hellström  *
14677f14f2fSThomas Hellström  * Return: %-EINTR if interrupted while blocking. %0 otherwise.
14777f14f2fSThomas Hellström  */
14877f14f2fSThomas Hellström int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
14977f14f2fSThomas Hellström {
15077f14f2fSThomas Hellström 	return mutex_lock_interruptible(&cache->lookup_mutex);
15177f14f2fSThomas Hellström }
15277f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
15377f14f2fSThomas Hellström 
15477f14f2fSThomas Hellström /**
15577f14f2fSThomas Hellström  * drm_pagemap_cache_unlock_lookup() - Unlock a drm_pagemap_cache after lookup.
15677f14f2fSThomas Hellström  * @cache: The drm_pagemap_cache to unlock.
15777f14f2fSThomas Hellström  */
15877f14f2fSThomas Hellström void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
15977f14f2fSThomas Hellström {
16077f14f2fSThomas Hellström 	mutex_unlock(&cache->lookup_mutex);
16177f14f2fSThomas Hellström }
16277f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
16377f14f2fSThomas Hellström 
16477f14f2fSThomas Hellström /**
16577f14f2fSThomas Hellström  * drm_pagemap_get_from_cache() - Lookup of drm_pagemaps.
16677f14f2fSThomas Hellström  * @cache: The cache used for lookup.
16777f14f2fSThomas Hellström  *
16877f14f2fSThomas Hellström  * If an active pagemap is present in the cache, it is immediately returned.
16977f14f2fSThomas Hellström  * If an inactive pagemap is present, it's removed from the shrinker list and
17077f14f2fSThomas Hellström  * an attempt is made to make it active.
17177f14f2fSThomas Hellström  * If no pagemap present or the attempt to make it active failed, %NULL is returned
17277f14f2fSThomas Hellström  * to indicate to the caller to create a new drm_pagemap and insert it into
17377f14f2fSThomas Hellström  * the cache.
17477f14f2fSThomas Hellström  *
17577f14f2fSThomas Hellström  * Return: A reference-counted pointer to a drm_pagemap if successful. An error
17677f14f2fSThomas Hellström  * pointer if an error occurred, or %NULL if no drm_pagemap was found and
17777f14f2fSThomas Hellström  * the caller should insert a new one.
17877f14f2fSThomas Hellström  */
17977f14f2fSThomas Hellström struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
18077f14f2fSThomas Hellström {
18177f14f2fSThomas Hellström 	struct drm_pagemap *dpagemap;
18277f14f2fSThomas Hellström 	int err;
18377f14f2fSThomas Hellström 
18477f14f2fSThomas Hellström 	lockdep_assert_held(&cache->lookup_mutex);
18577f14f2fSThomas Hellström retry:
18677f14f2fSThomas Hellström 	spin_lock(&cache->lock);
18777f14f2fSThomas Hellström 	dpagemap = cache->dpagemap;
18877f14f2fSThomas Hellström 	if (drm_pagemap_get_unless_zero(dpagemap)) {
18977f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
19077f14f2fSThomas Hellström 		return dpagemap;
19177f14f2fSThomas Hellström 	}
19277f14f2fSThomas Hellström 
19377f14f2fSThomas Hellström 	if (!dpagemap) {
19477f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
19577f14f2fSThomas Hellström 		return NULL;
19677f14f2fSThomas Hellström 	}
19777f14f2fSThomas Hellström 
19877f14f2fSThomas Hellström 	if (!try_wait_for_completion(&cache->queued)) {
19977f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
20077f14f2fSThomas Hellström 		err = wait_for_completion_interruptible(&cache->queued);
20177f14f2fSThomas Hellström 		if (err)
20277f14f2fSThomas Hellström 			return ERR_PTR(err);
20377f14f2fSThomas Hellström 		goto retry;
20477f14f2fSThomas Hellström 	}
20577f14f2fSThomas Hellström 
20677f14f2fSThomas Hellström 	if (drm_pagemap_shrinker_cancel(dpagemap)) {
20777f14f2fSThomas Hellström 		cache->dpagemap = NULL;
20877f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
20977f14f2fSThomas Hellström 		err = drm_pagemap_reinit(dpagemap);
21077f14f2fSThomas Hellström 		if (err) {
21177f14f2fSThomas Hellström 			drm_pagemap_destroy(dpagemap, false);
21277f14f2fSThomas Hellström 			return ERR_PTR(err);
21377f14f2fSThomas Hellström 		}
21477f14f2fSThomas Hellström 		drm_pagemap_cache_set_pagemap(cache, dpagemap);
21577f14f2fSThomas Hellström 	} else {
21677f14f2fSThomas Hellström 		cache->dpagemap = NULL;
21777f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
21877f14f2fSThomas Hellström 		dpagemap = NULL;
21977f14f2fSThomas Hellström 	}
22077f14f2fSThomas Hellström 
22177f14f2fSThomas Hellström 	return dpagemap;
22277f14f2fSThomas Hellström }
22377f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_get_from_cache);
22477f14f2fSThomas Hellström 
22577f14f2fSThomas Hellström /**
22677f14f2fSThomas Hellström  * drm_pagemap_cache_set_pagemap() - Assign a drm_pagemap to a drm_pagemap_cache
22777f14f2fSThomas Hellström  * @cache: The cache to assign the drm_pagemap to.
22877f14f2fSThomas Hellström  * @dpagemap: The drm_pagemap to assign.
22977f14f2fSThomas Hellström  *
23077f14f2fSThomas Hellström  * The function must be called to populate a drm_pagemap_cache only
23177f14f2fSThomas Hellström  * after a call to drm_pagemap_get_from_cache() returns NULL.
23277f14f2fSThomas Hellström  */
23377f14f2fSThomas Hellström void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
23477f14f2fSThomas Hellström {
23577f14f2fSThomas Hellström 	struct drm_device *drm = dpagemap->drm;
23677f14f2fSThomas Hellström 
23777f14f2fSThomas Hellström 	lockdep_assert_held(&cache->lookup_mutex);
23877f14f2fSThomas Hellström 	spin_lock(&cache->lock);
23977f14f2fSThomas Hellström 	dpagemap->cache = cache;
24077f14f2fSThomas Hellström 	swap(cache->dpagemap, dpagemap);
24177f14f2fSThomas Hellström 	reinit_completion(&cache->queued);
24277f14f2fSThomas Hellström 	spin_unlock(&cache->lock);
24377f14f2fSThomas Hellström 	drm_WARN_ON(drm, !!dpagemap);
24477f14f2fSThomas Hellström }
24577f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
24677f14f2fSThomas Hellström 
24777f14f2fSThomas Hellström /**
24877f14f2fSThomas Hellström  * drm_pagemap_get_from_cache_if_active() - Quick lookup of active drm_pagemaps
24977f14f2fSThomas Hellström  * @cache: The cache to lookup from.
25077f14f2fSThomas Hellström  *
25177f14f2fSThomas Hellström  * Function that should be used to lookup a drm_pagemap that is already active.
25277f14f2fSThomas Hellström  * (refcount > 0).
25377f14f2fSThomas Hellström  *
25477f14f2fSThomas Hellström  * Return: A pointer to the cache's drm_pagemap if it's active; %NULL otherwise.
25577f14f2fSThomas Hellström  */
25677f14f2fSThomas Hellström struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
25777f14f2fSThomas Hellström {
25877f14f2fSThomas Hellström 	struct drm_pagemap *dpagemap;
25977f14f2fSThomas Hellström 
26077f14f2fSThomas Hellström 	spin_lock(&cache->lock);
26177f14f2fSThomas Hellström 	dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
26277f14f2fSThomas Hellström 	spin_unlock(&cache->lock);
26377f14f2fSThomas Hellström 
26477f14f2fSThomas Hellström 	return dpagemap;
26577f14f2fSThomas Hellström }
26677f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
26777f14f2fSThomas Hellström 
26877f14f2fSThomas Hellström static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
26977f14f2fSThomas Hellström {
27077f14f2fSThomas Hellström 	struct drm_pagemap_cache *cache = dpagemap->cache;
27177f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker = cache->shrinker;
27277f14f2fSThomas Hellström 
27377f14f2fSThomas Hellström 	spin_lock(&shrinker->lock);
27477f14f2fSThomas Hellström 	if (list_empty(&dpagemap->shrink_link)) {
27577f14f2fSThomas Hellström 		spin_unlock(&shrinker->lock);
27677f14f2fSThomas Hellström 		return false;
27777f14f2fSThomas Hellström 	}
27877f14f2fSThomas Hellström 
27977f14f2fSThomas Hellström 	list_del_init(&dpagemap->shrink_link);
28077f14f2fSThomas Hellström 	atomic_dec(&shrinker->num_dpagemaps);
28177f14f2fSThomas Hellström 	spin_unlock(&shrinker->lock);
28277f14f2fSThomas Hellström 	return true;
28377f14f2fSThomas Hellström }
28477f14f2fSThomas Hellström 
28577f14f2fSThomas Hellström #ifdef CONFIG_PROVE_LOCKING
28677f14f2fSThomas Hellström /**
28777f14f2fSThomas Hellström  * drm_pagemap_shrinker_might_lock() - lockdep test for drm_pagemap_shrinker_add()
28877f14f2fSThomas Hellström  * @dpagemap: The drm pagemap.
28977f14f2fSThomas Hellström  *
29077f14f2fSThomas Hellström  * The drm_pagemap_shrinker_add() function performs some locking.
29177f14f2fSThomas Hellström  * This function can be called in code-paths that might
29277f14f2fSThomas Hellström  * call drm_pagemap_shrinker_add() to detect any lockdep problems early.
29377f14f2fSThomas Hellström  */
29477f14f2fSThomas Hellström void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
29577f14f2fSThomas Hellström {
29677f14f2fSThomas Hellström 	int idx;
29777f14f2fSThomas Hellström 
29877f14f2fSThomas Hellström 	if (drm_dev_enter(dpagemap->drm, &idx)) {
29977f14f2fSThomas Hellström 		struct drm_pagemap_cache *cache = dpagemap->cache;
30077f14f2fSThomas Hellström 
30177f14f2fSThomas Hellström 		if (cache)
30277f14f2fSThomas Hellström 			might_lock(&cache->shrinker->lock);
30377f14f2fSThomas Hellström 
30477f14f2fSThomas Hellström 		drm_dev_exit(idx);
30577f14f2fSThomas Hellström 	}
30677f14f2fSThomas Hellström }
30777f14f2fSThomas Hellström #endif
30877f14f2fSThomas Hellström 
30977f14f2fSThomas Hellström /**
31077f14f2fSThomas Hellström  * drm_pagemap_shrinker_add() - Add a drm_pagemap to the shrinker list or destroy
31177f14f2fSThomas Hellström  * @dpagemap: The drm_pagemap.
31277f14f2fSThomas Hellström  *
31377f14f2fSThomas Hellström  * If @dpagemap is associated with a &struct drm_pagemap_cache AND the
31477f14f2fSThomas Hellström  * struct device backing the drm device is still alive, add @dpagemap to
31577f14f2fSThomas Hellström  * the &struct drm_pagemap_shrinker list of shrinkable drm_pagemaps.
31677f14f2fSThomas Hellström  *
31777f14f2fSThomas Hellström  * Otherwise destroy the pagemap directly using drm_pagemap_destroy().
31877f14f2fSThomas Hellström  *
31977f14f2fSThomas Hellström  * This is an internal function which is not intended to be exposed to drivers.
32077f14f2fSThomas Hellström  */
32177f14f2fSThomas Hellström void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
32277f14f2fSThomas Hellström {
32377f14f2fSThomas Hellström 	struct drm_pagemap_cache *cache;
32477f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker;
32577f14f2fSThomas Hellström 	int idx;
32677f14f2fSThomas Hellström 
32777f14f2fSThomas Hellström 	/*
32877f14f2fSThomas Hellström 	 * The pagemap cache and shrinker are disabled at
32977f14f2fSThomas Hellström 	 * pci device remove time. After that, dpagemaps
33077f14f2fSThomas Hellström 	 * are freed directly.
33177f14f2fSThomas Hellström 	 */
33277f14f2fSThomas Hellström 	if (!drm_dev_enter(dpagemap->drm, &idx))
33377f14f2fSThomas Hellström 		goto out_no_cache;
33477f14f2fSThomas Hellström 
33577f14f2fSThomas Hellström 	cache = dpagemap->cache;
33677f14f2fSThomas Hellström 	if (!cache) {
33777f14f2fSThomas Hellström 		drm_dev_exit(idx);
33877f14f2fSThomas Hellström 		goto out_no_cache;
33977f14f2fSThomas Hellström 	}
34077f14f2fSThomas Hellström 
34177f14f2fSThomas Hellström 	shrinker = cache->shrinker;
34277f14f2fSThomas Hellström 	spin_lock(&shrinker->lock);
34377f14f2fSThomas Hellström 	list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
34477f14f2fSThomas Hellström 	atomic_inc(&shrinker->num_dpagemaps);
34577f14f2fSThomas Hellström 	spin_unlock(&shrinker->lock);
34677f14f2fSThomas Hellström 	complete_all(&cache->queued);
34777f14f2fSThomas Hellström 	drm_dev_exit(idx);
34877f14f2fSThomas Hellström 	return;
34977f14f2fSThomas Hellström 
35077f14f2fSThomas Hellström out_no_cache:
35177f14f2fSThomas Hellström 	drm_pagemap_destroy(dpagemap, true);
35277f14f2fSThomas Hellström }
35377f14f2fSThomas Hellström 
35477f14f2fSThomas Hellström static unsigned long
35577f14f2fSThomas Hellström drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
35677f14f2fSThomas Hellström {
35777f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
35877f14f2fSThomas Hellström 	unsigned long count = atomic_read(&shrinker->num_dpagemaps);
35977f14f2fSThomas Hellström 
36077f14f2fSThomas Hellström 	return count ? : SHRINK_EMPTY;
36177f14f2fSThomas Hellström }
36277f14f2fSThomas Hellström 
36377f14f2fSThomas Hellström static unsigned long
36477f14f2fSThomas Hellström drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
36577f14f2fSThomas Hellström {
36677f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker = shrink->private_data;
36777f14f2fSThomas Hellström 	struct drm_pagemap *dpagemap;
36877f14f2fSThomas Hellström 	struct drm_pagemap_cache *cache;
36977f14f2fSThomas Hellström 	unsigned long nr_freed = 0;
37077f14f2fSThomas Hellström 
37177f14f2fSThomas Hellström 	sc->nr_scanned = 0;
37277f14f2fSThomas Hellström 	spin_lock(&shrinker->lock);
37377f14f2fSThomas Hellström 	do {
37477f14f2fSThomas Hellström 		dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
37577f14f2fSThomas Hellström 						    shrink_link);
37677f14f2fSThomas Hellström 		if (!dpagemap)
37777f14f2fSThomas Hellström 			break;
37877f14f2fSThomas Hellström 
37977f14f2fSThomas Hellström 		atomic_dec(&shrinker->num_dpagemaps);
38077f14f2fSThomas Hellström 		list_del_init(&dpagemap->shrink_link);
38177f14f2fSThomas Hellström 		spin_unlock(&shrinker->lock);
38277f14f2fSThomas Hellström 
38377f14f2fSThomas Hellström 		sc->nr_scanned++;
38477f14f2fSThomas Hellström 		nr_freed++;
38577f14f2fSThomas Hellström 
38677f14f2fSThomas Hellström 		cache = dpagemap->cache;
38777f14f2fSThomas Hellström 		spin_lock(&cache->lock);
38877f14f2fSThomas Hellström 		cache->dpagemap = NULL;
38977f14f2fSThomas Hellström 		spin_unlock(&cache->lock);
39077f14f2fSThomas Hellström 
39177f14f2fSThomas Hellström 		drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
39277f14f2fSThomas Hellström 		drm_pagemap_destroy(dpagemap, true);
39377f14f2fSThomas Hellström 		spin_lock(&shrinker->lock);
39477f14f2fSThomas Hellström 	} while (sc->nr_scanned < sc->nr_to_scan);
39577f14f2fSThomas Hellström 	spin_unlock(&shrinker->lock);
39677f14f2fSThomas Hellström 
39777f14f2fSThomas Hellström 	return sc->nr_scanned ? nr_freed : SHRINK_STOP;
39877f14f2fSThomas Hellström }
39977f14f2fSThomas Hellström 
40077f14f2fSThomas Hellström static void drm_pagemap_shrinker_fini(void *arg)
40177f14f2fSThomas Hellström {
40277f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker = arg;
40377f14f2fSThomas Hellström 
40477f14f2fSThomas Hellström 	drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
40577f14f2fSThomas Hellström 	drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
40677f14f2fSThomas Hellström 	shrinker_free(shrinker->shrink);
40777f14f2fSThomas Hellström 	kfree(shrinker);
40877f14f2fSThomas Hellström }
40977f14f2fSThomas Hellström 
41077f14f2fSThomas Hellström /**
41177f14f2fSThomas Hellström  * drm_pagemap_shrinker_create_devm() - Create and register a pagemap shrinker
41277f14f2fSThomas Hellström  * @drm: The drm device
41377f14f2fSThomas Hellström  *
41477f14f2fSThomas Hellström  * Create and register a pagemap shrinker that shrinks unused pagemaps
41577f14f2fSThomas Hellström  * and thereby reduces memory footprint.
41677f14f2fSThomas Hellström  * The shrinker is drm_device managed and unregisters itself when
41777f14f2fSThomas Hellström  * the drm device is removed.
41877f14f2fSThomas Hellström  *
41977f14f2fSThomas Hellström  * Return: %0 on success, negative error code on failure.
42077f14f2fSThomas Hellström  */
42177f14f2fSThomas Hellström struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
42277f14f2fSThomas Hellström {
42377f14f2fSThomas Hellström 	struct drm_pagemap_shrinker *shrinker;
42477f14f2fSThomas Hellström 	struct shrinker *shrink;
42577f14f2fSThomas Hellström 	int err;
42677f14f2fSThomas Hellström 
427*bf4afc53SLinus Torvalds 	shrinker = kzalloc_obj(*shrinker);
42877f14f2fSThomas Hellström 	if (!shrinker)
42977f14f2fSThomas Hellström 		return ERR_PTR(-ENOMEM);
43077f14f2fSThomas Hellström 
43177f14f2fSThomas Hellström 	shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
43277f14f2fSThomas Hellström 	if (!shrink) {
43377f14f2fSThomas Hellström 		kfree(shrinker);
43477f14f2fSThomas Hellström 		return ERR_PTR(-ENOMEM);
43577f14f2fSThomas Hellström 	}
43677f14f2fSThomas Hellström 
43777f14f2fSThomas Hellström 	spin_lock_init(&shrinker->lock);
43877f14f2fSThomas Hellström 	INIT_LIST_HEAD(&shrinker->dpagemaps);
43977f14f2fSThomas Hellström 	shrinker->drm = drm;
44077f14f2fSThomas Hellström 	shrinker->shrink = shrink;
44177f14f2fSThomas Hellström 	shrink->count_objects = drm_pagemap_shrinker_count;
44277f14f2fSThomas Hellström 	shrink->scan_objects = drm_pagemap_shrinker_scan;
44377f14f2fSThomas Hellström 	shrink->private_data = shrinker;
44477f14f2fSThomas Hellström 	shrinker_register(shrink);
44577f14f2fSThomas Hellström 
44677f14f2fSThomas Hellström 	err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
44777f14f2fSThomas Hellström 	if (err)
44877f14f2fSThomas Hellström 		return ERR_PTR(err);
44977f14f2fSThomas Hellström 
45077f14f2fSThomas Hellström 	return shrinker;
45177f14f2fSThomas Hellström }
45277f14f2fSThomas Hellström EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
453e44f47a9SThomas Hellström 
454e44f47a9SThomas Hellström /**
455e44f47a9SThomas Hellström  * struct drm_pagemap_owner - Device interconnect group
456e44f47a9SThomas Hellström  * @kref: Reference count.
457e44f47a9SThomas Hellström  *
458e44f47a9SThomas Hellström  * A struct drm_pagemap_owner identifies a device interconnect group.
459e44f47a9SThomas Hellström  */
460e44f47a9SThomas Hellström struct drm_pagemap_owner {
461e44f47a9SThomas Hellström 	struct kref kref;
462e44f47a9SThomas Hellström };
463e44f47a9SThomas Hellström 
464e44f47a9SThomas Hellström static void drm_pagemap_owner_release(struct kref *kref)
465e44f47a9SThomas Hellström {
466e44f47a9SThomas Hellström 	kfree(container_of(kref, struct drm_pagemap_owner, kref));
467e44f47a9SThomas Hellström }
468e44f47a9SThomas Hellström 
469e44f47a9SThomas Hellström /**
470e44f47a9SThomas Hellström  * drm_pagemap_release_owner() - Stop participating in an interconnect group
471e44f47a9SThomas Hellström  * @peer: Pointer to the struct drm_pagemap_peer used when joining the group
472e44f47a9SThomas Hellström  *
473e44f47a9SThomas Hellström  * Stop participating in an interconnect group. This function is typically
474e44f47a9SThomas Hellström  * called when a pagemap is removed to indicate that it doesn't need to
475e44f47a9SThomas Hellström  * be taken into account.
476e44f47a9SThomas Hellström  */
477e44f47a9SThomas Hellström void drm_pagemap_release_owner(struct drm_pagemap_peer *peer)
478e44f47a9SThomas Hellström {
479e44f47a9SThomas Hellström 	struct drm_pagemap_owner_list *owner_list = peer->list;
480e44f47a9SThomas Hellström 
481e44f47a9SThomas Hellström 	if (!owner_list)
482e44f47a9SThomas Hellström 		return;
483e44f47a9SThomas Hellström 
484e44f47a9SThomas Hellström 	mutex_lock(&owner_list->lock);
485e44f47a9SThomas Hellström 	list_del(&peer->link);
486e44f47a9SThomas Hellström 	kref_put(&peer->owner->kref, drm_pagemap_owner_release);
487e44f47a9SThomas Hellström 	peer->owner = NULL;
488e44f47a9SThomas Hellström 	mutex_unlock(&owner_list->lock);
489e44f47a9SThomas Hellström }
490e44f47a9SThomas Hellström EXPORT_SYMBOL(drm_pagemap_release_owner);
491e44f47a9SThomas Hellström 
492e44f47a9SThomas Hellström /**
493e44f47a9SThomas Hellström  * typedef interconnect_fn - Callback function to identify fast interconnects
494e44f47a9SThomas Hellström  * @peer1: First endpoint.
495e44f47a9SThomas Hellström  * @peer2: Second endpont.
496e44f47a9SThomas Hellström  *
497e44f47a9SThomas Hellström  * The function returns %true iff @peer1 and @peer2 have a fast interconnect.
498e44f47a9SThomas Hellström  * Note that this is symmetrical. The function has no notion of client and provider,
499e44f47a9SThomas Hellström  * which may not be sufficient in some cases. However, since the callback is intended
500e44f47a9SThomas Hellström  * to guide in providing common pagemap owners, the notion of a common owner to
501e44f47a9SThomas Hellström  * indicate fast interconnects would then have to change as well.
502e44f47a9SThomas Hellström  *
503e44f47a9SThomas Hellström  * Return: %true iff @peer1 and @peer2 have a fast interconnect. Otherwise @false.
504e44f47a9SThomas Hellström  */
505e44f47a9SThomas Hellström typedef bool (*interconnect_fn)(struct drm_pagemap_peer *peer1, struct drm_pagemap_peer *peer2);
506e44f47a9SThomas Hellström 
507e44f47a9SThomas Hellström /**
508e44f47a9SThomas Hellström  * drm_pagemap_acquire_owner() - Join an interconnect group
509e44f47a9SThomas Hellström  * @peer: A struct drm_pagemap_peer keeping track of the device interconnect
510e44f47a9SThomas Hellström  * @owner_list: Pointer to the owner_list, keeping track of all interconnects
511e44f47a9SThomas Hellström  * @has_interconnect: Callback function to determine whether two peers have a
512e44f47a9SThomas Hellström  * fast local interconnect.
513e44f47a9SThomas Hellström  *
514e44f47a9SThomas Hellström  * Repeatedly calls @has_interconnect for @peer and other peers on @owner_list to
515e44f47a9SThomas Hellström  * determine a set of peers for which @peer has a fast interconnect. That set will
516e44f47a9SThomas Hellström  * have common &struct drm_pagemap_owner, and upon successful return, @peer::owner
517e44f47a9SThomas Hellström  * will point to that struct, holding a reference, and @peer will be registered in
518e44f47a9SThomas Hellström  * @owner_list. If @peer doesn't have any fast interconnects to other @peers, a
519e44f47a9SThomas Hellström  * new unique &struct drm_pagemap_owner will be allocated for it, and that
520e44f47a9SThomas Hellström  * may be shared with other peers that, at a later point, are determined to have
521e44f47a9SThomas Hellström  * a fast interconnect with @peer.
522e44f47a9SThomas Hellström  *
523e44f47a9SThomas Hellström  * When @peer no longer participates in an interconnect group,
524e44f47a9SThomas Hellström  * drm_pagemap_release_owner() should be called to drop the reference on the
525e44f47a9SThomas Hellström  * struct drm_pagemap_owner.
526e44f47a9SThomas Hellström  *
527e44f47a9SThomas Hellström  * Return: %0 on success, negative error code on failure.
528e44f47a9SThomas Hellström  */
529e44f47a9SThomas Hellström int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
530e44f47a9SThomas Hellström 			      struct drm_pagemap_owner_list *owner_list,
531e44f47a9SThomas Hellström 			      interconnect_fn has_interconnect)
532e44f47a9SThomas Hellström {
533e44f47a9SThomas Hellström 	struct drm_pagemap_peer *cur_peer;
534e44f47a9SThomas Hellström 	struct drm_pagemap_owner *owner = NULL;
535e44f47a9SThomas Hellström 	bool interconnect = false;
536e44f47a9SThomas Hellström 
537e44f47a9SThomas Hellström 	mutex_lock(&owner_list->lock);
538e44f47a9SThomas Hellström 	might_alloc(GFP_KERNEL);
539e44f47a9SThomas Hellström 	list_for_each_entry(cur_peer, &owner_list->peers, link) {
540e44f47a9SThomas Hellström 		if (cur_peer->owner != owner) {
541e44f47a9SThomas Hellström 			if (owner && interconnect)
542e44f47a9SThomas Hellström 				break;
543e44f47a9SThomas Hellström 			owner = cur_peer->owner;
544e44f47a9SThomas Hellström 			interconnect = true;
545e44f47a9SThomas Hellström 		}
546e44f47a9SThomas Hellström 		if (interconnect && !has_interconnect(peer, cur_peer))
547e44f47a9SThomas Hellström 			interconnect = false;
548e44f47a9SThomas Hellström 	}
549e44f47a9SThomas Hellström 
550e44f47a9SThomas Hellström 	if (!interconnect) {
551*bf4afc53SLinus Torvalds 		owner = kmalloc_obj(*owner);
552e44f47a9SThomas Hellström 		if (!owner) {
553e44f47a9SThomas Hellström 			mutex_unlock(&owner_list->lock);
554e44f47a9SThomas Hellström 			return -ENOMEM;
555e44f47a9SThomas Hellström 		}
556e44f47a9SThomas Hellström 		kref_init(&owner->kref);
557e44f47a9SThomas Hellström 		list_add_tail(&peer->link, &owner_list->peers);
558e44f47a9SThomas Hellström 	} else {
559e44f47a9SThomas Hellström 		kref_get(&owner->kref);
560e44f47a9SThomas Hellström 		list_add_tail(&peer->link, &cur_peer->link);
561e44f47a9SThomas Hellström 	}
562e44f47a9SThomas Hellström 	peer->owner = owner;
563e44f47a9SThomas Hellström 	peer->list = owner_list;
564e44f47a9SThomas Hellström 	mutex_unlock(&owner_list->lock);
565e44f47a9SThomas Hellström 
566e44f47a9SThomas Hellström 	return 0;
567e44f47a9SThomas Hellström }
568e44f47a9SThomas Hellström EXPORT_SYMBOL(drm_pagemap_acquire_owner);
569