1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/fat/cache.c
4 *
5 * Written 1992,1993 by Werner Almesberger
6 *
7 * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
8 * of inode number.
9 * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
10 */
11
12 #include <linux/slab.h>
13 #include "fat.h"
14
15 /* this must be > 0. */
16 #define FAT_MAX_CACHE 8
17
18 struct fat_cache {
19 struct list_head cache_list;
20 int nr_contig; /* number of contiguous clusters */
21 int fcluster; /* cluster number in the file. */
22 int dcluster; /* cluster number on disk. */
23 };
24
25 struct fat_cache_id {
26 unsigned int id;
27 int nr_contig;
28 int fcluster;
29 int dcluster;
30 };
31
32 static struct kmem_cache *fat_cache_cachep;
33
init_once(void * foo)34 static void init_once(void *foo)
35 {
36 struct fat_cache *cache = (struct fat_cache *)foo;
37
38 INIT_LIST_HEAD(&cache->cache_list);
39 }
40
fat_cache_init(void)41 int __init fat_cache_init(void)
42 {
43 fat_cache_cachep = kmem_cache_create("fat_cache",
44 sizeof(struct fat_cache),
45 0, SLAB_RECLAIM_ACCOUNT,
46 init_once);
47 if (fat_cache_cachep == NULL)
48 return -ENOMEM;
49 return 0;
50 }
51
fat_cache_destroy(void)52 void fat_cache_destroy(void)
53 {
54 kmem_cache_destroy(fat_cache_cachep);
55 }
56
fat_cache_alloc(struct inode * inode)57 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
58 {
59 return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
60 }
61
fat_cache_free(struct fat_cache * cache)62 static inline void fat_cache_free(struct fat_cache *cache)
63 {
64 BUG_ON(!list_empty(&cache->cache_list));
65 kmem_cache_free(fat_cache_cachep, cache);
66 }
67
fat_cache_update_lru(struct inode * inode,struct fat_cache * cache)68 static inline void fat_cache_update_lru(struct inode *inode,
69 struct fat_cache *cache)
70 {
71 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
72 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
73 }
74
fat_cache_lookup(struct inode * inode,int fclus,struct fat_cache_id * cid,int * cached_fclus,int * cached_dclus)75 static int fat_cache_lookup(struct inode *inode, int fclus,
76 struct fat_cache_id *cid,
77 int *cached_fclus, int *cached_dclus)
78 {
79 static struct fat_cache nohit = { .fcluster = 0, };
80
81 struct fat_cache *hit = &nohit, *p;
82 int offset = -1;
83
84 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
85 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
86 /* Find the cache of "fclus" or nearest cache. */
87 if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
88 hit = p;
89 if ((hit->fcluster + hit->nr_contig) < fclus) {
90 offset = hit->nr_contig;
91 } else {
92 offset = fclus - hit->fcluster;
93 break;
94 }
95 }
96 }
97 if (hit != &nohit) {
98 fat_cache_update_lru(inode, hit);
99
100 cid->id = MSDOS_I(inode)->cache_valid_id;
101 cid->nr_contig = hit->nr_contig;
102 cid->fcluster = hit->fcluster;
103 cid->dcluster = hit->dcluster;
104 *cached_fclus = cid->fcluster + offset;
105 *cached_dclus = cid->dcluster + offset;
106 }
107 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
108
109 return offset;
110 }
111
fat_cache_merge(struct inode * inode,struct fat_cache_id * new)112 static struct fat_cache *fat_cache_merge(struct inode *inode,
113 struct fat_cache_id *new)
114 {
115 struct fat_cache *p;
116
117 list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
118 /* Find the same part as "new" in cluster-chain. */
119 if (p->fcluster == new->fcluster) {
120 BUG_ON(p->dcluster != new->dcluster);
121 if (new->nr_contig > p->nr_contig)
122 p->nr_contig = new->nr_contig;
123 return p;
124 }
125 }
126 return NULL;
127 }
128
fat_cache_add(struct inode * inode,struct fat_cache_id * new)129 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
130 {
131 struct fat_cache *cache, *tmp;
132
133 if (new->fcluster == -1) /* dummy cache */
134 return;
135
136 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
137 if (new->id != FAT_CACHE_VALID &&
138 new->id != MSDOS_I(inode)->cache_valid_id)
139 goto out; /* this cache was invalidated */
140
141 cache = fat_cache_merge(inode, new);
142 if (cache == NULL) {
143 if (MSDOS_I(inode)->nr_caches < FAT_MAX_CACHE) {
144 MSDOS_I(inode)->nr_caches++;
145 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
146
147 tmp = fat_cache_alloc(inode);
148 if (!tmp) {
149 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
150 MSDOS_I(inode)->nr_caches--;
151 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
152 return;
153 }
154
155 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
156 cache = fat_cache_merge(inode, new);
157 if (cache != NULL) {
158 MSDOS_I(inode)->nr_caches--;
159 fat_cache_free(tmp);
160 goto out_update_lru;
161 }
162 cache = tmp;
163 } else {
164 struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
165 cache = list_entry(p, struct fat_cache, cache_list);
166 }
167 cache->fcluster = new->fcluster;
168 cache->dcluster = new->dcluster;
169 cache->nr_contig = new->nr_contig;
170 }
171 out_update_lru:
172 fat_cache_update_lru(inode, cache);
173 out:
174 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
175 }
176
177 /*
178 * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
179 * fixes itself after a while.
180 */
__fat_cache_inval_inode(struct inode * inode)181 static void __fat_cache_inval_inode(struct inode *inode)
182 {
183 struct msdos_inode_info *i = MSDOS_I(inode);
184 struct fat_cache *cache;
185
186 while (!list_empty(&i->cache_lru)) {
187 cache = list_entry(i->cache_lru.next,
188 struct fat_cache, cache_list);
189 list_del_init(&cache->cache_list);
190 i->nr_caches--;
191 fat_cache_free(cache);
192 }
193 /* Update. The copy of caches before this id is discarded. */
194 i->cache_valid_id++;
195 if (i->cache_valid_id == FAT_CACHE_VALID)
196 i->cache_valid_id++;
197 }
198
fat_cache_inval_inode(struct inode * inode)199 void fat_cache_inval_inode(struct inode *inode)
200 {
201 spin_lock(&MSDOS_I(inode)->cache_lru_lock);
202 __fat_cache_inval_inode(inode);
203 spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
204 }
205
cache_contiguous(struct fat_cache_id * cid,int dclus)206 static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
207 {
208 cid->nr_contig++;
209 return ((cid->dcluster + cid->nr_contig) == dclus);
210 }
211
cache_init(struct fat_cache_id * cid,int fclus,int dclus)212 static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
213 {
214 cid->id = FAT_CACHE_VALID;
215 cid->fcluster = fclus;
216 cid->dcluster = dclus;
217 cid->nr_contig = 0;
218 }
219
fat_get_cluster(struct inode * inode,int cluster,int * fclus,int * dclus)220 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
221 {
222 struct super_block *sb = inode->i_sb;
223 struct msdos_sb_info *sbi = MSDOS_SB(sb);
224 const int limit = sb->s_maxbytes >> sbi->cluster_bits;
225 struct fat_entry fatent;
226 struct fat_cache_id cid;
227 int nr;
228
229 BUG_ON(MSDOS_I(inode)->i_start == 0);
230
231 *fclus = 0;
232 *dclus = MSDOS_I(inode)->i_start;
233 if (!fat_valid_entry(sbi, *dclus)) {
234 fat_fs_error_ratelimit(sb,
235 "%s: invalid start cluster (i_pos %lld, start %08x)",
236 __func__, MSDOS_I(inode)->i_pos, *dclus);
237 return -EIO;
238 }
239 if (cluster == 0)
240 return 0;
241
242 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
243 /*
244 * dummy, always not contiguous
245 * This is reinitialized by cache_init(), later.
246 */
247 cache_init(&cid, -1, -1);
248 }
249
250 fatent_init(&fatent);
251 while (*fclus < cluster) {
252 /* prevent the infinite loop of cluster chain */
253 if (*fclus > limit) {
254 fat_fs_error_ratelimit(sb,
255 "%s: detected the cluster chain loop (i_pos %lld)",
256 __func__, MSDOS_I(inode)->i_pos);
257 nr = -EIO;
258 goto out;
259 }
260
261 nr = fat_ent_read(inode, &fatent, *dclus);
262 if (nr < 0)
263 goto out;
264 else if (nr == FAT_ENT_FREE) {
265 fat_fs_error_ratelimit(sb,
266 "%s: invalid cluster chain (i_pos %lld)",
267 __func__, MSDOS_I(inode)->i_pos);
268 nr = -EIO;
269 goto out;
270 } else if (nr == FAT_ENT_EOF) {
271 fat_cache_add(inode, &cid);
272 goto out;
273 }
274 (*fclus)++;
275 *dclus = nr;
276 if (!cache_contiguous(&cid, *dclus))
277 cache_init(&cid, *fclus, *dclus);
278 }
279 nr = 0;
280 fat_cache_add(inode, &cid);
281 out:
282 fatent_brelse(&fatent);
283 return nr;
284 }
285
fat_bmap_cluster(struct inode * inode,int cluster)286 static int fat_bmap_cluster(struct inode *inode, int cluster)
287 {
288 struct super_block *sb = inode->i_sb;
289 int ret, fclus, dclus;
290
291 if (MSDOS_I(inode)->i_start == 0)
292 return 0;
293
294 ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
295 if (ret < 0)
296 return ret;
297 else if (ret == FAT_ENT_EOF) {
298 fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
299 __func__, MSDOS_I(inode)->i_pos);
300 return -EIO;
301 }
302 return dclus;
303 }
304
fat_get_mapped_cluster(struct inode * inode,sector_t sector,sector_t last_block,unsigned long * mapped_blocks,sector_t * bmap)305 int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
306 sector_t last_block,
307 unsigned long *mapped_blocks, sector_t *bmap)
308 {
309 struct super_block *sb = inode->i_sb;
310 struct msdos_sb_info *sbi = MSDOS_SB(sb);
311 int cluster, offset;
312
313 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
314 offset = sector & (sbi->sec_per_clus - 1);
315 cluster = fat_bmap_cluster(inode, cluster);
316 if (cluster < 0)
317 return cluster;
318 else if (cluster) {
319 *bmap = fat_clus_to_blknr(sbi, cluster) + offset;
320 *mapped_blocks = sbi->sec_per_clus - offset;
321 if (*mapped_blocks > last_block - sector)
322 *mapped_blocks = last_block - sector;
323 }
324
325 return 0;
326 }
327
is_exceed_eof(struct inode * inode,sector_t sector,sector_t * last_block,int create)328 static int is_exceed_eof(struct inode *inode, sector_t sector,
329 sector_t *last_block, int create)
330 {
331 struct super_block *sb = inode->i_sb;
332 const unsigned long blocksize = sb->s_blocksize;
333 const unsigned char blocksize_bits = sb->s_blocksize_bits;
334
335 *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
336 if (sector >= *last_block) {
337 if (!create)
338 return 1;
339
340 /*
341 * ->mmu_private can access on only allocation path.
342 * (caller must hold ->i_mutex)
343 */
344 *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
345 >> blocksize_bits;
346 if (sector >= *last_block)
347 return 1;
348 }
349
350 return 0;
351 }
352
fat_bmap(struct inode * inode,sector_t sector,sector_t * phys,unsigned long * mapped_blocks,int create,bool from_bmap)353 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
354 unsigned long *mapped_blocks, int create, bool from_bmap)
355 {
356 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
357 sector_t last_block;
358
359 *phys = 0;
360 *mapped_blocks = 0;
361 if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) {
362 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
363 *phys = sector + sbi->dir_start;
364 *mapped_blocks = 1;
365 }
366 return 0;
367 }
368
369 if (!from_bmap) {
370 if (is_exceed_eof(inode, sector, &last_block, create))
371 return 0;
372 } else {
373 last_block = inode->i_blocks >>
374 (inode->i_sb->s_blocksize_bits - 9);
375 if (sector >= last_block)
376 return 0;
377 }
378
379 return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
380 phys);
381 }
382