1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS directory entry operations 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Modified for NILFS by Amagai Yoshiji. 8 */ 9 /* 10 * linux/fs/ext2/dir.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/dir.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * ext2 directory handling functions 24 * 25 * Big-endian to little-endian byte-swapping/bitmaps by 26 * David S. Miller (davem@caip.rutgers.edu), 1995 27 * 28 * All code that works with directory layout had been switched to pagecache 29 * and moved here. AV 30 */ 31 32 #include <linux/pagemap.h> 33 #include "nilfs.h" 34 #include "page.h" 35 36 static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) 37 { 38 unsigned int len = le16_to_cpu(dlen); 39 40 #if (PAGE_SIZE >= 65536) 41 if (len == NILFS_MAX_REC_LEN) 42 return 1 << 16; 43 #endif 44 return len; 45 } 46 47 static inline __le16 nilfs_rec_len_to_disk(unsigned int len) 48 { 49 #if (PAGE_SIZE >= 65536) 50 if (len == (1 << 16)) 51 return cpu_to_le16(NILFS_MAX_REC_LEN); 52 53 BUG_ON(len > (1 << 16)); 54 #endif 55 return cpu_to_le16(len); 56 } 57 58 /* 59 * nilfs uses block-sized chunks. Arguably, sector-sized ones would be 60 * more robust, but we have what we have 61 */ 62 static inline unsigned int nilfs_chunk_size(struct inode *inode) 63 { 64 return inode->i_sb->s_blocksize; 65 } 66 67 /* 68 * Return the offset into page `page_nr' of the last valid 69 * byte in that page, plus one. 70 */ 71 static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) 72 { 73 unsigned int last_byte = inode->i_size; 74 75 last_byte -= page_nr << PAGE_SHIFT; 76 if (last_byte > PAGE_SIZE) 77 last_byte = PAGE_SIZE; 78 return last_byte; 79 } 80 81 static int nilfs_prepare_chunk(struct folio *folio, unsigned int from, 82 unsigned int to) 83 { 84 loff_t pos = folio_pos(folio) + from; 85 86 return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block); 87 } 88 89 static void nilfs_commit_chunk(struct folio *folio, 90 struct address_space *mapping, size_t from, size_t to) 91 { 92 struct inode *dir = mapping->host; 93 loff_t pos = folio_pos(folio) + from; 94 size_t copied, len = to - from; 95 unsigned int nr_dirty; 96 int err; 97 98 nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to); 99 copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); 100 if (pos + copied > dir->i_size) 101 i_size_write(dir, pos + copied); 102 if (IS_DIRSYNC(dir)) 103 nilfs_set_transaction_flag(NILFS_TI_SYNC); 104 err = nilfs_set_file_dirty(dir, nr_dirty); 105 WARN_ON(err); /* do not happen */ 106 folio_unlock(folio); 107 } 108 109 static bool nilfs_check_folio(struct folio *folio, char *kaddr) 110 { 111 struct inode *dir = folio->mapping->host; 112 struct super_block *sb = dir->i_sb; 113 unsigned int chunk_size = nilfs_chunk_size(dir); 114 size_t offs, rec_len; 115 size_t limit = folio_size(folio); 116 struct nilfs_dir_entry *p; 117 char *error; 118 119 if (dir->i_size < folio_pos(folio) + limit) { 120 limit = dir->i_size - folio_pos(folio); 121 if (limit & (chunk_size - 1)) 122 goto Ebadsize; 123 if (!limit) 124 goto out; 125 } 126 for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { 127 p = (struct nilfs_dir_entry *)(kaddr + offs); 128 rec_len = nilfs_rec_len_from_disk(p->rec_len); 129 130 if (rec_len < NILFS_DIR_REC_LEN(1)) 131 goto Eshort; 132 if (rec_len & 3) 133 goto Ealign; 134 if (rec_len < NILFS_DIR_REC_LEN(p->name_len)) 135 goto Enamelen; 136 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) 137 goto Espan; 138 } 139 if (offs != limit) 140 goto Eend; 141 out: 142 folio_set_checked(folio); 143 return true; 144 145 /* Too bad, we had an error */ 146 147 Ebadsize: 148 nilfs_error(sb, 149 "size of directory #%lu is not a multiple of chunk size", 150 dir->i_ino); 151 goto fail; 152 Eshort: 153 error = "rec_len is smaller than minimal"; 154 goto bad_entry; 155 Ealign: 156 error = "unaligned directory entry"; 157 goto bad_entry; 158 Enamelen: 159 error = "rec_len is too small for name_len"; 160 goto bad_entry; 161 Espan: 162 error = "directory entry across blocks"; 163 bad_entry: 164 nilfs_error(sb, 165 "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d", 166 dir->i_ino, error, (folio->index << PAGE_SHIFT) + offs, 167 (unsigned long)le64_to_cpu(p->inode), 168 rec_len, p->name_len); 169 goto fail; 170 Eend: 171 p = (struct nilfs_dir_entry *)(kaddr + offs); 172 nilfs_error(sb, 173 "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu", 174 dir->i_ino, (folio->index << PAGE_SHIFT) + offs, 175 (unsigned long)le64_to_cpu(p->inode)); 176 fail: 177 folio_set_error(folio); 178 return false; 179 } 180 181 static void *nilfs_get_folio(struct inode *dir, unsigned long n, 182 struct folio **foliop) 183 { 184 struct address_space *mapping = dir->i_mapping; 185 struct folio *folio = read_mapping_folio(mapping, n, NULL); 186 void *kaddr; 187 188 if (IS_ERR(folio)) 189 return folio; 190 191 kaddr = kmap_local_folio(folio, 0); 192 if (unlikely(!folio_test_checked(folio))) { 193 if (!nilfs_check_folio(folio, kaddr)) 194 goto fail; 195 } 196 197 *foliop = folio; 198 return kaddr; 199 200 fail: 201 folio_release_kmap(folio, kaddr); 202 return ERR_PTR(-EIO); 203 } 204 205 /* 206 * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure. 207 * 208 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. 209 */ 210 static int 211 nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) 212 { 213 if (len != de->name_len) 214 return 0; 215 if (!de->inode) 216 return 0; 217 return !memcmp(name, de->name, len); 218 } 219 220 /* 221 * p is at least 6 bytes before the end of page 222 */ 223 static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p) 224 { 225 return (struct nilfs_dir_entry *)((char *)p + 226 nilfs_rec_len_from_disk(p->rec_len)); 227 } 228 229 static unsigned char 230 nilfs_filetype_table[NILFS_FT_MAX] = { 231 [NILFS_FT_UNKNOWN] = DT_UNKNOWN, 232 [NILFS_FT_REG_FILE] = DT_REG, 233 [NILFS_FT_DIR] = DT_DIR, 234 [NILFS_FT_CHRDEV] = DT_CHR, 235 [NILFS_FT_BLKDEV] = DT_BLK, 236 [NILFS_FT_FIFO] = DT_FIFO, 237 [NILFS_FT_SOCK] = DT_SOCK, 238 [NILFS_FT_SYMLINK] = DT_LNK, 239 }; 240 241 #define S_SHIFT 12 242 static unsigned char 243 nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = { 244 [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, 245 [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, 246 [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, 247 [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV, 248 [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO, 249 [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK, 250 [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK, 251 }; 252 253 static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) 254 { 255 umode_t mode = inode->i_mode; 256 257 de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; 258 } 259 260 static int nilfs_readdir(struct file *file, struct dir_context *ctx) 261 { 262 loff_t pos = ctx->pos; 263 struct inode *inode = file_inode(file); 264 struct super_block *sb = inode->i_sb; 265 unsigned int offset = pos & ~PAGE_MASK; 266 unsigned long n = pos >> PAGE_SHIFT; 267 unsigned long npages = dir_pages(inode); 268 269 if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) 270 return 0; 271 272 for ( ; n < npages; n++, offset = 0) { 273 char *kaddr, *limit; 274 struct nilfs_dir_entry *de; 275 struct folio *folio; 276 277 kaddr = nilfs_get_folio(inode, n, &folio); 278 if (IS_ERR(kaddr)) { 279 nilfs_error(sb, "bad page in #%lu", inode->i_ino); 280 ctx->pos += PAGE_SIZE - offset; 281 return -EIO; 282 } 283 de = (struct nilfs_dir_entry *)(kaddr + offset); 284 limit = kaddr + nilfs_last_byte(inode, n) - 285 NILFS_DIR_REC_LEN(1); 286 for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) { 287 if (de->rec_len == 0) { 288 nilfs_error(sb, "zero-length directory entry"); 289 folio_release_kmap(folio, kaddr); 290 return -EIO; 291 } 292 if (de->inode) { 293 unsigned char t; 294 295 if (de->file_type < NILFS_FT_MAX) 296 t = nilfs_filetype_table[de->file_type]; 297 else 298 t = DT_UNKNOWN; 299 300 if (!dir_emit(ctx, de->name, de->name_len, 301 le64_to_cpu(de->inode), t)) { 302 folio_release_kmap(folio, kaddr); 303 return 0; 304 } 305 } 306 ctx->pos += nilfs_rec_len_from_disk(de->rec_len); 307 } 308 folio_release_kmap(folio, kaddr); 309 } 310 return 0; 311 } 312 313 /* 314 * nilfs_find_entry() 315 * 316 * Finds an entry in the specified directory with the wanted name. It 317 * returns the folio in which the entry was found, and the entry itself. 318 * The folio is mapped and unlocked. When the caller is finished with 319 * the entry, it should call folio_release_kmap(). 320 * 321 * On failure, returns NULL and the caller should ignore foliop. 322 */ 323 struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir, 324 const struct qstr *qstr, struct folio **foliop) 325 { 326 const unsigned char *name = qstr->name; 327 int namelen = qstr->len; 328 unsigned int reclen = NILFS_DIR_REC_LEN(namelen); 329 unsigned long start, n; 330 unsigned long npages = dir_pages(dir); 331 struct nilfs_inode_info *ei = NILFS_I(dir); 332 struct nilfs_dir_entry *de; 333 334 if (npages == 0) 335 goto out; 336 337 start = ei->i_dir_start_lookup; 338 if (start >= npages) 339 start = 0; 340 n = start; 341 do { 342 char *kaddr = nilfs_get_folio(dir, n, foliop); 343 344 if (!IS_ERR(kaddr)) { 345 de = (struct nilfs_dir_entry *)kaddr; 346 kaddr += nilfs_last_byte(dir, n) - reclen; 347 while ((char *) de <= kaddr) { 348 if (de->rec_len == 0) { 349 nilfs_error(dir->i_sb, 350 "zero-length directory entry"); 351 folio_release_kmap(*foliop, kaddr); 352 goto out; 353 } 354 if (nilfs_match(namelen, name, de)) 355 goto found; 356 de = nilfs_next_entry(de); 357 } 358 folio_release_kmap(*foliop, kaddr); 359 } 360 if (++n >= npages) 361 n = 0; 362 /* next folio is past the blocks we've got */ 363 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { 364 nilfs_error(dir->i_sb, 365 "dir %lu size %lld exceeds block count %llu", 366 dir->i_ino, dir->i_size, 367 (unsigned long long)dir->i_blocks); 368 goto out; 369 } 370 } while (n != start); 371 out: 372 return NULL; 373 374 found: 375 ei->i_dir_start_lookup = n; 376 return de; 377 } 378 379 struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop) 380 { 381 struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop); 382 383 if (IS_ERR(de)) 384 return NULL; 385 return nilfs_next_entry(de); 386 } 387 388 ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) 389 { 390 ino_t res = 0; 391 struct nilfs_dir_entry *de; 392 struct folio *folio; 393 394 de = nilfs_find_entry(dir, qstr, &folio); 395 if (de) { 396 res = le64_to_cpu(de->inode); 397 folio_release_kmap(folio, de); 398 } 399 return res; 400 } 401 402 void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, 403 struct folio *folio, struct inode *inode) 404 { 405 size_t from = offset_in_folio(folio, de); 406 size_t to = from + nilfs_rec_len_from_disk(de->rec_len); 407 struct address_space *mapping = folio->mapping; 408 int err; 409 410 folio_lock(folio); 411 err = nilfs_prepare_chunk(folio, from, to); 412 BUG_ON(err); 413 de->inode = cpu_to_le64(inode->i_ino); 414 nilfs_set_de_type(de, inode); 415 nilfs_commit_chunk(folio, mapping, from, to); 416 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 417 } 418 419 /* 420 * Parent is locked. 421 */ 422 int nilfs_add_link(struct dentry *dentry, struct inode *inode) 423 { 424 struct inode *dir = d_inode(dentry->d_parent); 425 const unsigned char *name = dentry->d_name.name; 426 int namelen = dentry->d_name.len; 427 unsigned int chunk_size = nilfs_chunk_size(dir); 428 unsigned int reclen = NILFS_DIR_REC_LEN(namelen); 429 unsigned short rec_len, name_len; 430 struct folio *folio = NULL; 431 struct nilfs_dir_entry *de; 432 unsigned long npages = dir_pages(dir); 433 unsigned long n; 434 size_t from, to; 435 int err; 436 437 /* 438 * We take care of directory expansion in the same loop. 439 * This code plays outside i_size, so it locks the folio 440 * to protect that region. 441 */ 442 for (n = 0; n <= npages; n++) { 443 char *kaddr = nilfs_get_folio(dir, n, &folio); 444 char *dir_end; 445 446 if (IS_ERR(kaddr)) 447 return PTR_ERR(kaddr); 448 folio_lock(folio); 449 dir_end = kaddr + nilfs_last_byte(dir, n); 450 de = (struct nilfs_dir_entry *)kaddr; 451 kaddr += folio_size(folio) - reclen; 452 while ((char *)de <= kaddr) { 453 if ((char *)de == dir_end) { 454 /* We hit i_size */ 455 name_len = 0; 456 rec_len = chunk_size; 457 de->rec_len = nilfs_rec_len_to_disk(chunk_size); 458 de->inode = 0; 459 goto got_it; 460 } 461 if (de->rec_len == 0) { 462 nilfs_error(dir->i_sb, 463 "zero-length directory entry"); 464 err = -EIO; 465 goto out_unlock; 466 } 467 err = -EEXIST; 468 if (nilfs_match(namelen, name, de)) 469 goto out_unlock; 470 name_len = NILFS_DIR_REC_LEN(de->name_len); 471 rec_len = nilfs_rec_len_from_disk(de->rec_len); 472 if (!de->inode && rec_len >= reclen) 473 goto got_it; 474 if (rec_len >= name_len + reclen) 475 goto got_it; 476 de = (struct nilfs_dir_entry *)((char *)de + rec_len); 477 } 478 folio_unlock(folio); 479 folio_release_kmap(folio, kaddr); 480 } 481 BUG(); 482 return -EINVAL; 483 484 got_it: 485 from = offset_in_folio(folio, de); 486 to = from + rec_len; 487 err = nilfs_prepare_chunk(folio, from, to); 488 if (err) 489 goto out_unlock; 490 if (de->inode) { 491 struct nilfs_dir_entry *de1; 492 493 de1 = (struct nilfs_dir_entry *)((char *)de + name_len); 494 de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len); 495 de->rec_len = nilfs_rec_len_to_disk(name_len); 496 de = de1; 497 } 498 de->name_len = namelen; 499 memcpy(de->name, name, namelen); 500 de->inode = cpu_to_le64(inode->i_ino); 501 nilfs_set_de_type(de, inode); 502 nilfs_commit_chunk(folio, folio->mapping, from, to); 503 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 504 nilfs_mark_inode_dirty(dir); 505 /* OFFSET_CACHE */ 506 out_put: 507 folio_release_kmap(folio, de); 508 return err; 509 out_unlock: 510 folio_unlock(folio); 511 goto out_put; 512 } 513 514 /* 515 * nilfs_delete_entry deletes a directory entry by merging it with the 516 * previous entry. Folio is up-to-date. 517 */ 518 int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio) 519 { 520 struct address_space *mapping = folio->mapping; 521 struct inode *inode = mapping->host; 522 char *kaddr = (char *)((unsigned long)dir & ~(folio_size(folio) - 1)); 523 size_t from, to; 524 struct nilfs_dir_entry *de, *pde = NULL; 525 int err; 526 527 from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); 528 to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len); 529 de = (struct nilfs_dir_entry *)(kaddr + from); 530 531 while ((char *)de < (char *)dir) { 532 if (de->rec_len == 0) { 533 nilfs_error(inode->i_sb, 534 "zero-length directory entry"); 535 err = -EIO; 536 goto out; 537 } 538 pde = de; 539 de = nilfs_next_entry(de); 540 } 541 if (pde) 542 from = (char *)pde - kaddr; 543 folio_lock(folio); 544 err = nilfs_prepare_chunk(folio, from, to); 545 BUG_ON(err); 546 if (pde) 547 pde->rec_len = nilfs_rec_len_to_disk(to - from); 548 dir->inode = 0; 549 nilfs_commit_chunk(folio, mapping, from, to); 550 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 551 out: 552 return err; 553 } 554 555 /* 556 * Set the first fragment of directory. 557 */ 558 int nilfs_make_empty(struct inode *inode, struct inode *parent) 559 { 560 struct address_space *mapping = inode->i_mapping; 561 struct folio *folio = filemap_grab_folio(mapping, 0); 562 unsigned int chunk_size = nilfs_chunk_size(inode); 563 struct nilfs_dir_entry *de; 564 int err; 565 void *kaddr; 566 567 if (IS_ERR(folio)) 568 return PTR_ERR(folio); 569 570 err = nilfs_prepare_chunk(folio, 0, chunk_size); 571 if (unlikely(err)) { 572 folio_unlock(folio); 573 goto fail; 574 } 575 kaddr = kmap_local_folio(folio, 0); 576 memset(kaddr, 0, chunk_size); 577 de = (struct nilfs_dir_entry *)kaddr; 578 de->name_len = 1; 579 de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1)); 580 memcpy(de->name, ".\0\0", 4); 581 de->inode = cpu_to_le64(inode->i_ino); 582 nilfs_set_de_type(de, inode); 583 584 de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1)); 585 de->name_len = 2; 586 de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1)); 587 de->inode = cpu_to_le64(parent->i_ino); 588 memcpy(de->name, "..\0", 4); 589 nilfs_set_de_type(de, inode); 590 kunmap_local(kaddr); 591 nilfs_commit_chunk(folio, mapping, 0, chunk_size); 592 fail: 593 folio_put(folio); 594 return err; 595 } 596 597 /* 598 * routine to check that the specified directory is empty (for rmdir) 599 */ 600 int nilfs_empty_dir(struct inode *inode) 601 { 602 struct folio *folio = NULL; 603 char *kaddr; 604 unsigned long i, npages = dir_pages(inode); 605 606 for (i = 0; i < npages; i++) { 607 struct nilfs_dir_entry *de; 608 609 kaddr = nilfs_get_folio(inode, i, &folio); 610 if (IS_ERR(kaddr)) 611 continue; 612 613 de = (struct nilfs_dir_entry *)kaddr; 614 kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1); 615 616 while ((char *)de <= kaddr) { 617 if (de->rec_len == 0) { 618 nilfs_error(inode->i_sb, 619 "zero-length directory entry (kaddr=%p, de=%p)", 620 kaddr, de); 621 goto not_empty; 622 } 623 if (de->inode != 0) { 624 /* check for . and .. */ 625 if (de->name[0] != '.') 626 goto not_empty; 627 if (de->name_len > 2) 628 goto not_empty; 629 if (de->name_len < 2) { 630 if (de->inode != 631 cpu_to_le64(inode->i_ino)) 632 goto not_empty; 633 } else if (de->name[1] != '.') 634 goto not_empty; 635 } 636 de = nilfs_next_entry(de); 637 } 638 folio_release_kmap(folio, kaddr); 639 } 640 return 1; 641 642 not_empty: 643 folio_release_kmap(folio, kaddr); 644 return 0; 645 } 646 647 const struct file_operations nilfs_dir_operations = { 648 .llseek = generic_file_llseek, 649 .read = generic_read_dir, 650 .iterate_shared = nilfs_readdir, 651 .unlocked_ioctl = nilfs_ioctl, 652 #ifdef CONFIG_COMPAT 653 .compat_ioctl = nilfs_compat_ioctl, 654 #endif /* CONFIG_COMPAT */ 655 .fsync = nilfs_sync_file, 656 657 }; 658