xref: /linux/fs/exfat/cache.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  linux/fs/fat/cache.c
4  *
5  *  Written 1992,1993 by Werner Almesberger
6  *
7  *  Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
8  *	of inode number.
9  *  May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
10  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/unaligned.h>
15 #include <linux/buffer_head.h>
16 
17 #include "exfat_raw.h"
18 #include "exfat_fs.h"
19 
20 #define EXFAT_MAX_CACHE		16
21 
22 struct exfat_cache {
23 	struct list_head cache_list;
24 	unsigned int nr_contig;	/* number of contiguous clusters */
25 	unsigned int fcluster;	/* cluster number in the file. */
26 	unsigned int dcluster;	/* cluster number on disk. */
27 };
28 
29 struct exfat_cache_id {
30 	unsigned int id;
31 	unsigned int nr_contig;
32 	unsigned int fcluster;
33 	unsigned int dcluster;
34 };
35 
36 static struct kmem_cache *exfat_cachep;
37 
38 static void exfat_cache_init_once(void *c)
39 {
40 	struct exfat_cache *cache = (struct exfat_cache *)c;
41 
42 	INIT_LIST_HEAD(&cache->cache_list);
43 }
44 
45 int exfat_cache_init(void)
46 {
47 	exfat_cachep = kmem_cache_create("exfat_cache",
48 				sizeof(struct exfat_cache),
49 				0, SLAB_RECLAIM_ACCOUNT,
50 				exfat_cache_init_once);
51 	if (!exfat_cachep)
52 		return -ENOMEM;
53 	return 0;
54 }
55 
56 void exfat_cache_shutdown(void)
57 {
58 	if (!exfat_cachep)
59 		return;
60 	kmem_cache_destroy(exfat_cachep);
61 }
62 
63 static inline struct exfat_cache *exfat_cache_alloc(void)
64 {
65 	return kmem_cache_alloc(exfat_cachep, GFP_NOFS);
66 }
67 
68 static inline void exfat_cache_free(struct exfat_cache *cache)
69 {
70 	WARN_ON(!list_empty(&cache->cache_list));
71 	kmem_cache_free(exfat_cachep, cache);
72 }
73 
74 static inline void exfat_cache_update_lru(struct inode *inode,
75 		struct exfat_cache *cache)
76 {
77 	struct exfat_inode_info *ei = EXFAT_I(inode);
78 
79 	if (ei->cache_lru.next != &cache->cache_list)
80 		list_move(&cache->cache_list, &ei->cache_lru);
81 }
82 
83 /*
84  * Find the cache that covers or precedes 'fclus' and return the last
85  * cluster before the next cache range.
86  */
87 static inline unsigned int
88 exfat_cache_lookup(struct inode *inode, struct exfat_cache_id *cid,
89 		unsigned int fclus, unsigned int end,
90 		unsigned int *cached_fclus, unsigned int *cached_dclus)
91 {
92 	struct exfat_inode_info *ei = EXFAT_I(inode);
93 	static struct exfat_cache nohit = { .fcluster = 0, };
94 	struct exfat_cache *hit = &nohit, *p;
95 	unsigned int tail = 0;		/* End boundary of hit cache */
96 
97 	/*
98 	 * Search range [fclus, end]. Stop early if:
99 	 * 1. Cache covers entire range, or
100 	 * 2. Next cache starts at current cache tail
101 	 */
102 	spin_lock(&ei->cache_lru_lock);
103 	list_for_each_entry(p, &ei->cache_lru, cache_list) {
104 		/* Find the cache of "fclus" or nearest cache. */
105 		if (p->fcluster <= fclus) {
106 			if (p->fcluster < hit->fcluster)
107 				continue;
108 
109 			hit = p;
110 			tail = hit->fcluster + hit->nr_contig;
111 
112 			/* Current cache covers [fclus, end] completely */
113 			if (tail >= end)
114 				break;
115 		} else if (p->fcluster <= end) {
116 			end = p->fcluster - 1;
117 
118 			/*
119 			 * If we have a hit and next cache starts within/at
120 			 * its tail, caches are contiguous, stop searching.
121 			 */
122 			if (tail && tail >= end)
123 				break;
124 		}
125 	}
126 	if (hit != &nohit) {
127 		unsigned int offset;
128 
129 		exfat_cache_update_lru(inode, hit);
130 		cid->id = ei->cache_valid_id;
131 		cid->nr_contig = hit->nr_contig;
132 		cid->fcluster = hit->fcluster;
133 		cid->dcluster = hit->dcluster;
134 
135 		offset = min(cid->nr_contig, fclus - cid->fcluster);
136 		*cached_fclus = cid->fcluster + offset;
137 		*cached_dclus = cid->dcluster + offset;
138 	}
139 	spin_unlock(&ei->cache_lru_lock);
140 
141 	/* Return next cache start or 'end' if no more caches */
142 	return end;
143 }
144 
145 static struct exfat_cache *exfat_cache_merge(struct inode *inode,
146 		struct exfat_cache_id *new)
147 {
148 	struct exfat_inode_info *ei = EXFAT_I(inode);
149 	struct exfat_cache *p;
150 
151 	list_for_each_entry(p, &ei->cache_lru, cache_list) {
152 		/* Find the same part as "new" in cluster-chain. */
153 		if (p->fcluster == new->fcluster) {
154 			if (new->nr_contig > p->nr_contig)
155 				p->nr_contig = new->nr_contig;
156 			return p;
157 		}
158 	}
159 	return NULL;
160 }
161 
162 static void exfat_cache_add(struct inode *inode,
163 		struct exfat_cache_id *new)
164 {
165 	struct exfat_inode_info *ei = EXFAT_I(inode);
166 	struct exfat_cache *cache, *tmp;
167 
168 	if (new->fcluster == EXFAT_EOF_CLUSTER) /* dummy cache */
169 		return;
170 
171 	spin_lock(&ei->cache_lru_lock);
172 	if (new->id != EXFAT_CACHE_VALID &&
173 	    new->id != ei->cache_valid_id)
174 		goto unlock;	/* this cache was invalidated */
175 
176 	cache = exfat_cache_merge(inode, new);
177 	if (cache == NULL) {
178 		if (ei->nr_caches < EXFAT_MAX_CACHE) {
179 			ei->nr_caches++;
180 			spin_unlock(&ei->cache_lru_lock);
181 
182 			tmp = exfat_cache_alloc();
183 			if (!tmp) {
184 				spin_lock(&ei->cache_lru_lock);
185 				ei->nr_caches--;
186 				spin_unlock(&ei->cache_lru_lock);
187 				return;
188 			}
189 
190 			spin_lock(&ei->cache_lru_lock);
191 			cache = exfat_cache_merge(inode, new);
192 			if (cache != NULL) {
193 				ei->nr_caches--;
194 				exfat_cache_free(tmp);
195 				goto out_update_lru;
196 			}
197 			cache = tmp;
198 		} else {
199 			struct list_head *p = ei->cache_lru.prev;
200 
201 			cache = list_entry(p,
202 					struct exfat_cache, cache_list);
203 		}
204 		cache->fcluster = new->fcluster;
205 		cache->dcluster = new->dcluster;
206 		cache->nr_contig = new->nr_contig;
207 	}
208 out_update_lru:
209 	exfat_cache_update_lru(inode, cache);
210 unlock:
211 	spin_unlock(&ei->cache_lru_lock);
212 }
213 
214 /*
215  * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
216  * fixes itself after a while.
217  */
218 static void __exfat_cache_inval_inode(struct inode *inode)
219 {
220 	struct exfat_inode_info *ei = EXFAT_I(inode);
221 	struct exfat_cache *cache;
222 
223 	while (!list_empty(&ei->cache_lru)) {
224 		cache = list_entry(ei->cache_lru.next,
225 				   struct exfat_cache, cache_list);
226 		list_del_init(&cache->cache_list);
227 		ei->nr_caches--;
228 		exfat_cache_free(cache);
229 	}
230 	/* Update. The copy of caches before this id is discarded. */
231 	ei->cache_valid_id++;
232 	if (ei->cache_valid_id == EXFAT_CACHE_VALID)
233 		ei->cache_valid_id++;
234 }
235 
236 void exfat_cache_inval_inode(struct inode *inode)
237 {
238 	struct exfat_inode_info *ei = EXFAT_I(inode);
239 
240 	spin_lock(&ei->cache_lru_lock);
241 	__exfat_cache_inval_inode(inode);
242 	spin_unlock(&ei->cache_lru_lock);
243 }
244 
245 static inline int cache_contiguous(struct exfat_cache_id *cid,
246 		unsigned int dclus)
247 {
248 	cid->nr_contig++;
249 	return cid->dcluster + cid->nr_contig == dclus;
250 }
251 
252 static inline void cache_init(struct exfat_cache_id *cid,
253 		unsigned int fclus, unsigned int dclus)
254 {
255 	cid->id = EXFAT_CACHE_VALID;
256 	cid->fcluster = fclus;
257 	cid->dcluster = dclus;
258 	cid->nr_contig = 0;
259 }
260 
261 int exfat_get_cluster(struct inode *inode, unsigned int cluster,
262 		unsigned int *dclus, unsigned int *count,
263 		unsigned int *last_dclus)
264 {
265 	struct super_block *sb = inode->i_sb;
266 	struct exfat_inode_info *ei = EXFAT_I(inode);
267 	struct buffer_head *bh = NULL;
268 	struct exfat_cache_id cid;
269 	unsigned int content, fclus;
270 	unsigned int end = cluster + *count - 1;
271 
272 	if (ei->start_clu == EXFAT_FREE_CLUSTER) {
273 		exfat_fs_error(sb,
274 			"invalid access to exfat cache (entry 0x%08x)",
275 			ei->start_clu);
276 		return -EIO;
277 	}
278 
279 	fclus = 0;
280 	*dclus = ei->start_clu;
281 	*last_dclus = *dclus;
282 
283 	/*
284 	 * This case should not exist, as exfat_map_cluster function doesn't
285 	 * call this routine when start_clu == EXFAT_EOF_CLUSTER.
286 	 * This case is retained here for routine completeness.
287 	 */
288 	if (*dclus == EXFAT_EOF_CLUSTER) {
289 		*count = 0;
290 		return 0;
291 	}
292 
293 	/* If only the first cluster is needed, return now. */
294 	if (fclus == cluster && *count == 1)
295 		return 0;
296 
297 	cache_init(&cid, fclus, *dclus);
298 	/*
299 	 * Update the 'end' to exclude the next cache range, as clusters in
300 	 * different cache are typically not contiguous.
301 	 */
302 	end = exfat_cache_lookup(inode, &cid, cluster, end, &fclus, dclus);
303 
304 	/* Return if the cache covers the entire range. */
305 	if (cid.fcluster + cid.nr_contig >= end) {
306 		*count = end - cluster + 1;
307 		return 0;
308 	}
309 
310 	/* Find the first cluster we need. */
311 	while (fclus < cluster) {
312 		if (exfat_ent_get(sb, *dclus, &content, &bh))
313 			return -EIO;
314 
315 		*last_dclus = *dclus;
316 		*dclus = content;
317 		fclus++;
318 
319 		if (content == EXFAT_EOF_CLUSTER)
320 			break;
321 
322 		if (!cache_contiguous(&cid, *dclus))
323 			cache_init(&cid, fclus, *dclus);
324 	}
325 
326 	/*
327 	 * Now the cid cache contains the first cluster requested, collect
328 	 * the remaining clusters of this contiguous extent.
329 	 */
330 	if (*dclus != EXFAT_EOF_CLUSTER) {
331 		unsigned int clu = *dclus;
332 
333 		while (fclus < end) {
334 			if (exfat_ent_get(sb, clu, &content, &bh))
335 				return -EIO;
336 			if (++clu != content)
337 				break;
338 			fclus++;
339 		}
340 		cid.nr_contig = fclus - cid.fcluster;
341 		*count = fclus - cluster + 1;
342 
343 		/*
344 		 * Cache this discontiguous cluster, we'll definitely need
345 		 * it later
346 		 */
347 		if (fclus < end && content != EXFAT_EOF_CLUSTER) {
348 			exfat_cache_add(inode, &cid);
349 			cache_init(&cid, fclus + 1, content);
350 		}
351 	} else {
352 		*count = 0;
353 	}
354 	brelse(bh);
355 	exfat_cache_add(inode, &cid);
356 	return 0;
357 }
358