1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2004, OGAWA Hirofumi 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/sched/signal.h> 8 #include <linux/backing-dev-defs.h> 9 #include "fat.h" 10 11 struct fatent_operations { 12 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); 13 void (*ent_set_ptr)(struct fat_entry *, int); 14 int (*ent_bread)(struct super_block *, struct fat_entry *, 15 int, sector_t); 16 int (*ent_get)(struct fat_entry *); 17 void (*ent_put)(struct fat_entry *, int); 18 int (*ent_next)(struct fat_entry *); 19 }; 20 21 static DEFINE_SPINLOCK(fat12_entry_lock); 22 23 static void fat12_ent_blocknr(struct super_block *sb, int entry, 24 int *offset, sector_t *blocknr) 25 { 26 struct msdos_sb_info *sbi = MSDOS_SB(sb); 27 int bytes = entry + (entry >> 1); 28 WARN_ON(!fat_valid_entry(sbi, entry)); 29 *offset = bytes & (sb->s_blocksize - 1); 30 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); 31 } 32 33 static void fat_ent_blocknr(struct super_block *sb, int entry, 34 int *offset, sector_t *blocknr) 35 { 36 struct msdos_sb_info *sbi = MSDOS_SB(sb); 37 int bytes = (entry << sbi->fatent_shift); 38 WARN_ON(!fat_valid_entry(sbi, entry)); 39 *offset = bytes & (sb->s_blocksize - 1); 40 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); 41 } 42 43 static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset) 44 { 45 struct buffer_head **bhs = fatent->bhs; 46 if (fatent->nr_bhs == 1) { 47 WARN_ON(offset >= (bhs[0]->b_size - 1)); 48 fatent->u.ent12_p[0] = bhs[0]->b_data + offset; 49 fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1); 50 } else { 51 WARN_ON(offset != (bhs[0]->b_size - 1)); 52 fatent->u.ent12_p[0] = bhs[0]->b_data + offset; 53 fatent->u.ent12_p[1] = bhs[1]->b_data; 54 } 55 } 56 57 static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset) 58 { 59 WARN_ON(offset & (2 - 1)); 60 fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset); 61 } 62 63 static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset) 64 { 65 WARN_ON(offset & (4 - 1)); 66 fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset); 67 } 68 69 static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent, 70 int offset, sector_t blocknr) 71 { 72 struct buffer_head **bhs = fatent->bhs; 73 74 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); 75 fatent->fat_inode = MSDOS_SB(sb)->fat_inode; 76 77 bhs[0] = sb_bread(sb, blocknr); 78 if (!bhs[0]) 79 goto err; 80 81 if ((offset + 1) < sb->s_blocksize) 82 fatent->nr_bhs = 1; 83 else { 84 /* This entry is block boundary, it needs the next block */ 85 blocknr++; 86 bhs[1] = sb_bread(sb, blocknr); 87 if (!bhs[1]) 88 goto err_brelse; 89 fatent->nr_bhs = 2; 90 } 91 fat12_ent_set_ptr(fatent, offset); 92 return 0; 93 94 err_brelse: 95 brelse(bhs[0]); 96 err: 97 fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); 98 return -EIO; 99 } 100 101 static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, 102 int offset, sector_t blocknr) 103 { 104 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; 105 106 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start); 107 fatent->fat_inode = MSDOS_SB(sb)->fat_inode; 108 fatent->bhs[0] = sb_bread(sb, blocknr); 109 if (!fatent->bhs[0]) { 110 fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", 111 (llu)blocknr); 112 return -EIO; 113 } 114 fatent->nr_bhs = 1; 115 ops->ent_set_ptr(fatent, offset); 116 return 0; 117 } 118 119 static int fat12_ent_get(struct fat_entry *fatent) 120 { 121 u8 **ent12_p = fatent->u.ent12_p; 122 int next; 123 124 spin_lock(&fat12_entry_lock); 125 if (fatent->entry & 1) 126 next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4); 127 else 128 next = (*ent12_p[1] << 8) | *ent12_p[0]; 129 spin_unlock(&fat12_entry_lock); 130 131 next &= 0x0fff; 132 if (next >= BAD_FAT12) 133 next = FAT_ENT_EOF; 134 return next; 135 } 136 137 static int fat16_ent_get(struct fat_entry *fatent) 138 { 139 int next = le16_to_cpu(*fatent->u.ent16_p); 140 WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1)); 141 if (next >= BAD_FAT16) 142 next = FAT_ENT_EOF; 143 return next; 144 } 145 146 static int fat32_ent_get(struct fat_entry *fatent) 147 { 148 int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff; 149 WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1)); 150 if (next >= BAD_FAT32) 151 next = FAT_ENT_EOF; 152 return next; 153 } 154 155 static void fat12_ent_put(struct fat_entry *fatent, int new) 156 { 157 u8 **ent12_p = fatent->u.ent12_p; 158 159 if (new == FAT_ENT_EOF) 160 new = EOF_FAT12; 161 162 spin_lock(&fat12_entry_lock); 163 if (fatent->entry & 1) { 164 *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f); 165 *ent12_p[1] = new >> 4; 166 } else { 167 *ent12_p[0] = new & 0xff; 168 *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8); 169 } 170 spin_unlock(&fat12_entry_lock); 171 172 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); 173 if (fatent->nr_bhs == 2) 174 mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode); 175 } 176 177 static void fat16_ent_put(struct fat_entry *fatent, int new) 178 { 179 if (new == FAT_ENT_EOF) 180 new = EOF_FAT16; 181 182 *fatent->u.ent16_p = cpu_to_le16(new); 183 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); 184 } 185 186 static void fat32_ent_put(struct fat_entry *fatent, int new) 187 { 188 WARN_ON(new & 0xf0000000); 189 new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; 190 *fatent->u.ent32_p = cpu_to_le32(new); 191 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode); 192 } 193 194 static int fat12_ent_next(struct fat_entry *fatent) 195 { 196 u8 **ent12_p = fatent->u.ent12_p; 197 struct buffer_head **bhs = fatent->bhs; 198 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); 199 200 fatent->entry++; 201 if (fatent->nr_bhs == 1) { 202 WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + 203 (bhs[0]->b_size - 2))); 204 WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + 205 (bhs[0]->b_size - 1))); 206 if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) { 207 ent12_p[0] = nextp - 1; 208 ent12_p[1] = nextp; 209 return 1; 210 } 211 } else { 212 WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + 213 (bhs[0]->b_size - 1))); 214 WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data); 215 ent12_p[0] = nextp - 1; 216 ent12_p[1] = nextp; 217 brelse(bhs[0]); 218 bhs[0] = bhs[1]; 219 fatent->nr_bhs = 1; 220 return 1; 221 } 222 ent12_p[0] = NULL; 223 ent12_p[1] = NULL; 224 return 0; 225 } 226 227 static int fat16_ent_next(struct fat_entry *fatent) 228 { 229 const struct buffer_head *bh = fatent->bhs[0]; 230 fatent->entry++; 231 if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) { 232 fatent->u.ent16_p++; 233 return 1; 234 } 235 fatent->u.ent16_p = NULL; 236 return 0; 237 } 238 239 static int fat32_ent_next(struct fat_entry *fatent) 240 { 241 const struct buffer_head *bh = fatent->bhs[0]; 242 fatent->entry++; 243 if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) { 244 fatent->u.ent32_p++; 245 return 1; 246 } 247 fatent->u.ent32_p = NULL; 248 return 0; 249 } 250 251 static const struct fatent_operations fat12_ops = { 252 .ent_blocknr = fat12_ent_blocknr, 253 .ent_set_ptr = fat12_ent_set_ptr, 254 .ent_bread = fat12_ent_bread, 255 .ent_get = fat12_ent_get, 256 .ent_put = fat12_ent_put, 257 .ent_next = fat12_ent_next, 258 }; 259 260 static const struct fatent_operations fat16_ops = { 261 .ent_blocknr = fat_ent_blocknr, 262 .ent_set_ptr = fat16_ent_set_ptr, 263 .ent_bread = fat_ent_bread, 264 .ent_get = fat16_ent_get, 265 .ent_put = fat16_ent_put, 266 .ent_next = fat16_ent_next, 267 }; 268 269 static const struct fatent_operations fat32_ops = { 270 .ent_blocknr = fat_ent_blocknr, 271 .ent_set_ptr = fat32_ent_set_ptr, 272 .ent_bread = fat_ent_bread, 273 .ent_get = fat32_ent_get, 274 .ent_put = fat32_ent_put, 275 .ent_next = fat32_ent_next, 276 }; 277 278 static inline void lock_fat(struct msdos_sb_info *sbi) 279 { 280 mutex_lock(&sbi->fat_lock); 281 } 282 283 static inline void unlock_fat(struct msdos_sb_info *sbi) 284 { 285 mutex_unlock(&sbi->fat_lock); 286 } 287 288 void fat_ent_access_init(struct super_block *sb) 289 { 290 struct msdos_sb_info *sbi = MSDOS_SB(sb); 291 292 mutex_init(&sbi->fat_lock); 293 294 if (is_fat32(sbi)) { 295 sbi->fatent_shift = 2; 296 sbi->fatent_ops = &fat32_ops; 297 } else if (is_fat16(sbi)) { 298 sbi->fatent_shift = 1; 299 sbi->fatent_ops = &fat16_ops; 300 } else if (is_fat12(sbi)) { 301 sbi->fatent_shift = -1; 302 sbi->fatent_ops = &fat12_ops; 303 } else { 304 fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits); 305 } 306 } 307 308 static void mark_fsinfo_dirty(struct super_block *sb) 309 { 310 struct msdos_sb_info *sbi = MSDOS_SB(sb); 311 312 if (sb_rdonly(sb) || !is_fat32(sbi)) 313 return; 314 315 __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC); 316 } 317 318 static inline int fat_ent_update_ptr(struct super_block *sb, 319 struct fat_entry *fatent, 320 int offset, sector_t blocknr) 321 { 322 struct msdos_sb_info *sbi = MSDOS_SB(sb); 323 const struct fatent_operations *ops = sbi->fatent_ops; 324 struct buffer_head **bhs = fatent->bhs; 325 326 /* Is this fatent's blocks including this entry? */ 327 if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) 328 return 0; 329 if (is_fat12(sbi)) { 330 if ((offset + 1) < sb->s_blocksize) { 331 /* This entry is on bhs[0]. */ 332 if (fatent->nr_bhs == 2) { 333 brelse(bhs[1]); 334 fatent->nr_bhs = 1; 335 } 336 } else { 337 /* This entry needs the next block. */ 338 if (fatent->nr_bhs != 2) 339 return 0; 340 if (bhs[1]->b_blocknr != (blocknr + 1)) 341 return 0; 342 } 343 } 344 ops->ent_set_ptr(fatent, offset); 345 return 1; 346 } 347 348 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) 349 { 350 struct super_block *sb = inode->i_sb; 351 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); 352 const struct fatent_operations *ops = sbi->fatent_ops; 353 int err, offset; 354 sector_t blocknr; 355 356 if (!fat_valid_entry(sbi, entry)) { 357 fatent_brelse(fatent); 358 fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); 359 return -EIO; 360 } 361 362 fatent_set_entry(fatent, entry); 363 ops->ent_blocknr(sb, entry, &offset, &blocknr); 364 365 if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) { 366 fatent_brelse(fatent); 367 err = ops->ent_bread(sb, fatent, offset, blocknr); 368 if (err) 369 return err; 370 } 371 return ops->ent_get(fatent); 372 } 373 374 /* FIXME: We can write the blocks as more big chunk. */ 375 static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, 376 int nr_bhs) 377 { 378 struct msdos_sb_info *sbi = MSDOS_SB(sb); 379 struct buffer_head *c_bh; 380 int err, n, copy; 381 382 err = 0; 383 for (copy = 1; copy < sbi->fats; copy++) { 384 sector_t backup_fat = sbi->fat_length * copy; 385 386 for (n = 0; n < nr_bhs; n++) { 387 c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr); 388 if (!c_bh) { 389 err = -ENOMEM; 390 goto error; 391 } 392 /* Avoid race with userspace read via bdev */ 393 lock_buffer(c_bh); 394 memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); 395 set_buffer_uptodate(c_bh); 396 unlock_buffer(c_bh); 397 mark_buffer_dirty_inode(c_bh, sbi->fat_inode); 398 if (sb->s_flags & SB_SYNCHRONOUS) 399 err = sync_dirty_buffer(c_bh); 400 brelse(c_bh); 401 if (err) 402 goto error; 403 } 404 } 405 error: 406 return err; 407 } 408 409 int fat_ent_write(struct inode *inode, struct fat_entry *fatent, 410 int new, int wait) 411 { 412 struct super_block *sb = inode->i_sb; 413 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; 414 int err; 415 416 ops->ent_put(fatent, new); 417 if (wait) { 418 err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs); 419 if (err) 420 return err; 421 } 422 return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs); 423 } 424 425 static inline int fat_ent_next(struct msdos_sb_info *sbi, 426 struct fat_entry *fatent) 427 { 428 if (sbi->fatent_ops->ent_next(fatent)) { 429 if (fatent->entry < sbi->max_cluster) 430 return 1; 431 } 432 return 0; 433 } 434 435 static inline int fat_ent_read_block(struct super_block *sb, 436 struct fat_entry *fatent) 437 { 438 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; 439 sector_t blocknr; 440 int offset; 441 442 fatent_brelse(fatent); 443 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); 444 return ops->ent_bread(sb, fatent, offset, blocknr); 445 } 446 447 static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs, 448 struct fat_entry *fatent) 449 { 450 int n, i; 451 452 for (n = 0; n < fatent->nr_bhs; n++) { 453 for (i = 0; i < *nr_bhs; i++) { 454 if (fatent->bhs[n] == bhs[i]) 455 break; 456 } 457 if (i == *nr_bhs) { 458 get_bh(fatent->bhs[n]); 459 bhs[i] = fatent->bhs[n]; 460 (*nr_bhs)++; 461 } 462 } 463 } 464 465 int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster) 466 { 467 struct super_block *sb = inode->i_sb; 468 struct msdos_sb_info *sbi = MSDOS_SB(sb); 469 const struct fatent_operations *ops = sbi->fatent_ops; 470 struct fat_entry fatent, prev_ent; 471 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 472 int i, count, err, nr_bhs, idx_clus; 473 474 BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */ 475 476 lock_fat(sbi); 477 if (sbi->free_clusters != -1 && sbi->free_clus_valid && 478 sbi->free_clusters < nr_cluster) { 479 unlock_fat(sbi); 480 return -ENOSPC; 481 } 482 483 err = nr_bhs = idx_clus = 0; 484 count = FAT_START_ENT; 485 fatent_init(&prev_ent); 486 fatent_init(&fatent); 487 fatent_set_entry(&fatent, sbi->prev_free + 1); 488 while (count < sbi->max_cluster) { 489 if (fatent.entry >= sbi->max_cluster) 490 fatent.entry = FAT_START_ENT; 491 fatent_set_entry(&fatent, fatent.entry); 492 err = fat_ent_read_block(sb, &fatent); 493 if (err) 494 goto out; 495 496 /* Find the free entries in a block */ 497 do { 498 if (ops->ent_get(&fatent) == FAT_ENT_FREE) { 499 int entry = fatent.entry; 500 501 /* make the cluster chain */ 502 ops->ent_put(&fatent, FAT_ENT_EOF); 503 if (prev_ent.nr_bhs) 504 ops->ent_put(&prev_ent, entry); 505 506 fat_collect_bhs(bhs, &nr_bhs, &fatent); 507 508 sbi->prev_free = entry; 509 if (sbi->free_clusters != -1) 510 sbi->free_clusters--; 511 512 cluster[idx_clus] = entry; 513 idx_clus++; 514 if (idx_clus == nr_cluster) 515 goto out; 516 517 /* 518 * fat_collect_bhs() gets ref-count of bhs, 519 * so we can still use the prev_ent. 520 */ 521 prev_ent = fatent; 522 } 523 count++; 524 if (count == sbi->max_cluster) 525 break; 526 } while (fat_ent_next(sbi, &fatent)); 527 } 528 529 /* Couldn't allocate the free entries */ 530 sbi->free_clusters = 0; 531 sbi->free_clus_valid = 1; 532 err = -ENOSPC; 533 534 out: 535 unlock_fat(sbi); 536 mark_fsinfo_dirty(sb); 537 fatent_brelse(&fatent); 538 if (!err) { 539 if (inode_needs_sync(inode)) 540 err = fat_sync_bhs(bhs, nr_bhs); 541 if (!err) 542 err = fat_mirror_bhs(sb, bhs, nr_bhs); 543 } 544 for (i = 0; i < nr_bhs; i++) 545 brelse(bhs[i]); 546 547 if (err && idx_clus) 548 fat_free_clusters(inode, cluster[0]); 549 550 return err; 551 } 552 553 int fat_free_clusters(struct inode *inode, int cluster) 554 { 555 struct super_block *sb = inode->i_sb; 556 struct msdos_sb_info *sbi = MSDOS_SB(sb); 557 const struct fatent_operations *ops = sbi->fatent_ops; 558 struct fat_entry fatent; 559 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 560 int i, err, nr_bhs; 561 int first_cl = cluster, dirty_fsinfo = 0; 562 563 nr_bhs = 0; 564 fatent_init(&fatent); 565 lock_fat(sbi); 566 do { 567 cluster = fat_ent_read(inode, &fatent, cluster); 568 if (cluster < 0) { 569 err = cluster; 570 goto error; 571 } else if (cluster == FAT_ENT_FREE) { 572 fat_fs_error(sb, "%s: deleting FAT entry beyond EOF", 573 __func__); 574 err = -EIO; 575 goto error; 576 } 577 578 if (sbi->options.discard) { 579 /* 580 * Issue discard for the sectors we no longer 581 * care about, batching contiguous clusters 582 * into one request 583 */ 584 if (cluster != fatent.entry + 1) { 585 int nr_clus = fatent.entry - first_cl + 1; 586 587 sb_issue_discard(sb, 588 fat_clus_to_blknr(sbi, first_cl), 589 nr_clus * sbi->sec_per_clus, 590 GFP_NOFS, 0); 591 592 first_cl = cluster; 593 } 594 } 595 596 ops->ent_put(&fatent, FAT_ENT_FREE); 597 if (sbi->free_clusters != -1) { 598 sbi->free_clusters++; 599 dirty_fsinfo = 1; 600 } 601 602 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) { 603 if (sb->s_flags & SB_SYNCHRONOUS) { 604 err = fat_sync_bhs(bhs, nr_bhs); 605 if (err) 606 goto error; 607 } 608 err = fat_mirror_bhs(sb, bhs, nr_bhs); 609 if (err) 610 goto error; 611 for (i = 0; i < nr_bhs; i++) 612 brelse(bhs[i]); 613 nr_bhs = 0; 614 } 615 fat_collect_bhs(bhs, &nr_bhs, &fatent); 616 } while (cluster != FAT_ENT_EOF); 617 618 if (sb->s_flags & SB_SYNCHRONOUS) { 619 err = fat_sync_bhs(bhs, nr_bhs); 620 if (err) 621 goto error; 622 } 623 err = fat_mirror_bhs(sb, bhs, nr_bhs); 624 error: 625 fatent_brelse(&fatent); 626 for (i = 0; i < nr_bhs; i++) 627 brelse(bhs[i]); 628 unlock_fat(sbi); 629 if (dirty_fsinfo) 630 mark_fsinfo_dirty(sb); 631 632 return err; 633 } 634 EXPORT_SYMBOL_GPL(fat_free_clusters); 635 636 struct fatent_ra { 637 sector_t cur; 638 sector_t limit; 639 640 unsigned int ra_blocks; 641 sector_t ra_advance; 642 sector_t ra_next; 643 sector_t ra_limit; 644 }; 645 646 static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra, 647 struct fat_entry *fatent, int ent_limit) 648 { 649 struct msdos_sb_info *sbi = MSDOS_SB(sb); 650 const struct fatent_operations *ops = sbi->fatent_ops; 651 sector_t blocknr, block_end; 652 int offset; 653 /* 654 * This is the sequential read, so ra_pages * 2 (but try to 655 * align the optimal hardware IO size). 656 * [BTW, 128kb covers the whole sectors for FAT12 and FAT16] 657 */ 658 unsigned long ra_pages = sb->s_bdi->ra_pages; 659 unsigned int reada_blocks; 660 661 if (fatent->entry >= ent_limit) 662 return; 663 664 if (ra_pages > sb->s_bdi->io_pages) 665 ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages); 666 reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1); 667 668 /* Initialize the range for sequential read */ 669 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); 670 ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end); 671 ra->cur = 0; 672 ra->limit = (block_end + 1) - blocknr; 673 674 /* Advancing the window at half size */ 675 ra->ra_blocks = reada_blocks >> 1; 676 ra->ra_advance = ra->cur; 677 ra->ra_next = ra->cur; 678 ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit); 679 } 680 681 /* Assuming to be called before reading a new block (increments ->cur). */ 682 static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra, 683 struct fat_entry *fatent) 684 { 685 if (ra->ra_next >= ra->ra_limit) 686 return; 687 688 if (ra->cur >= ra->ra_advance) { 689 struct msdos_sb_info *sbi = MSDOS_SB(sb); 690 const struct fatent_operations *ops = sbi->fatent_ops; 691 struct blk_plug plug; 692 sector_t blocknr, diff; 693 int offset; 694 695 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); 696 697 diff = blocknr - ra->cur; 698 blk_start_plug(&plug); 699 /* 700 * FIXME: we would want to directly use the bio with 701 * pages to reduce the number of segments. 702 */ 703 for (; ra->ra_next < ra->ra_limit; ra->ra_next++) 704 sb_breadahead(sb, ra->ra_next + diff); 705 blk_finish_plug(&plug); 706 707 /* Advance the readahead window */ 708 ra->ra_advance += ra->ra_blocks; 709 ra->ra_limit += min_t(sector_t, 710 ra->ra_blocks, ra->limit - ra->ra_limit); 711 } 712 ra->cur++; 713 } 714 715 int fat_count_free_clusters(struct super_block *sb) 716 { 717 struct msdos_sb_info *sbi = MSDOS_SB(sb); 718 const struct fatent_operations *ops = sbi->fatent_ops; 719 struct fat_entry fatent; 720 struct fatent_ra fatent_ra; 721 int err = 0, free; 722 723 lock_fat(sbi); 724 if (sbi->free_clusters != -1 && sbi->free_clus_valid) 725 goto out; 726 727 free = 0; 728 fatent_init(&fatent); 729 fatent_set_entry(&fatent, FAT_START_ENT); 730 fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster); 731 while (fatent.entry < sbi->max_cluster) { 732 /* readahead of fat blocks */ 733 fat_ent_reada(sb, &fatent_ra, &fatent); 734 735 err = fat_ent_read_block(sb, &fatent); 736 if (err) 737 goto out; 738 739 do { 740 if (ops->ent_get(&fatent) == FAT_ENT_FREE) 741 free++; 742 } while (fat_ent_next(sbi, &fatent)); 743 cond_resched(); 744 } 745 sbi->free_clusters = free; 746 sbi->free_clus_valid = 1; 747 mark_fsinfo_dirty(sb); 748 fatent_brelse(&fatent); 749 out: 750 unlock_fat(sbi); 751 return err; 752 } 753 754 static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus) 755 { 756 struct msdos_sb_info *sbi = MSDOS_SB(sb); 757 return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus), 758 nr_clus * sbi->sec_per_clus, GFP_NOFS, 0); 759 } 760 761 int fat_trim_fs(struct inode *inode, struct fstrim_range *range) 762 { 763 struct super_block *sb = inode->i_sb; 764 struct msdos_sb_info *sbi = MSDOS_SB(sb); 765 const struct fatent_operations *ops = sbi->fatent_ops; 766 struct fat_entry fatent; 767 struct fatent_ra fatent_ra; 768 u64 ent_start, ent_end, minlen, trimmed = 0; 769 u32 free = 0; 770 int err = 0; 771 772 /* 773 * FAT data is organized as clusters, trim at the granulary of cluster. 774 * 775 * fstrim_range is in byte, convert values to cluster index. 776 * Treat sectors before data region as all used, not to trim them. 777 */ 778 ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT); 779 ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1; 780 minlen = range->minlen >> sbi->cluster_bits; 781 782 if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size) 783 return -EINVAL; 784 if (ent_end >= sbi->max_cluster) 785 ent_end = sbi->max_cluster - 1; 786 787 fatent_init(&fatent); 788 lock_fat(sbi); 789 fatent_set_entry(&fatent, ent_start); 790 fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1); 791 while (fatent.entry <= ent_end) { 792 /* readahead of fat blocks */ 793 fat_ent_reada(sb, &fatent_ra, &fatent); 794 795 err = fat_ent_read_block(sb, &fatent); 796 if (err) 797 goto error; 798 do { 799 if (ops->ent_get(&fatent) == FAT_ENT_FREE) { 800 free++; 801 } else if (free) { 802 if (free >= minlen) { 803 u32 clus = fatent.entry - free; 804 805 err = fat_trim_clusters(sb, clus, free); 806 if (err && err != -EOPNOTSUPP) 807 goto error; 808 if (!err) 809 trimmed += free; 810 err = 0; 811 } 812 free = 0; 813 } 814 } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end); 815 816 if (fatal_signal_pending(current)) { 817 err = -ERESTARTSYS; 818 goto error; 819 } 820 821 if (need_resched()) { 822 fatent_brelse(&fatent); 823 unlock_fat(sbi); 824 cond_resched(); 825 lock_fat(sbi); 826 } 827 } 828 /* handle scenario when tail entries are all free */ 829 if (free && free >= minlen) { 830 u32 clus = fatent.entry - free; 831 832 err = fat_trim_clusters(sb, clus, free); 833 if (err && err != -EOPNOTSUPP) 834 goto error; 835 if (!err) 836 trimmed += free; 837 err = 0; 838 } 839 840 error: 841 fatent_brelse(&fatent); 842 unlock_fat(sbi); 843 844 range->len = trimmed << sbi->cluster_bits; 845 846 return err; 847 } 848