xref: /linux/fs/btrfs/lru_cache.h (revision 78c3925c048c752334873f56c3a3d1c9d53e0416)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_LRU_CACHE_H
4 #define BTRFS_LRU_CACHE_H
5 
6 #include <linux/types.h>
7 #include <linux/maple_tree.h>
8 #include <linux/list.h>
9 #include "lru_cache.h"
10 
11 /*
12  * A cache entry. This is meant to be embedded in a structure of a user of
13  * this module. Similar to how struct list_head and struct rb_node are used.
14  *
15  * Note: it should be embedded as the first element in a struct (offset 0), and
16  * this module assumes it was allocated with kmalloc(), so it calls kfree() when
17  * it needs to free an entry.
18  */
19 struct btrfs_lru_cache_entry {
20 	struct list_head lru_list;
21 	u64 key;
22 	/*
23 	 * Optional generation associated to a key. Use 0 if not needed/used.
24 	 * Entries with the same key and different generations are stored in a
25 	 * linked list, so use this only for cases where there's a small number
26 	 * of different generations.
27 	 */
28 	u64 gen;
29 	/*
30 	 * The maple tree uses unsigned long type for the keys, which is 32 bits
31 	 * on 32 bits systems, and 64 bits on 64 bits systems. So if we want to
32 	 * use something like inode numbers as keys, which are always a u64, we
33 	 * have to deal with this in a special way - we store the key in the
34 	 * entry itself, as a u64, and the values inserted into the maple tree
35 	 * are linked lists of entries - so in case we are on a 64 bits system,
36 	 * that list always has a single entry, while on 32 bits systems it
37 	 * may have more than one, with each entry having the same value for
38 	 * their lower 32 bits of the u64 key.
39 	 */
40 	struct list_head list;
41 };
42 
43 struct btrfs_lru_cache {
44 	struct list_head lru_list;
45 	struct maple_tree entries;
46 	/* Number of entries stored in the cache. */
47 	unsigned int size;
48 	/* Maximum number of entries the cache can have. */
49 	unsigned int max_size;
50 };
51 
52 #define btrfs_lru_cache_for_each_entry_safe(cache, entry, tmp)		\
53 	list_for_each_entry_safe_reverse((entry), (tmp), &(cache)->lru_list, lru_list)
54 
55 static inline struct btrfs_lru_cache_entry *btrfs_lru_cache_lru_entry(
56 					      struct btrfs_lru_cache *cache)
57 {
58 	return list_first_entry_or_null(&cache->lru_list,
59 					struct btrfs_lru_cache_entry, lru_list);
60 }
61 
62 void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size);
63 struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
64 						     u64 key, u64 gen);
65 int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
66 			  struct btrfs_lru_cache_entry *new_entry,
67 			  gfp_t gfp);
68 void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
69 			    struct btrfs_lru_cache_entry *entry);
70 void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache);
71 
72 #endif
73