1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ufs/ufs_dir.c 4 * 5 * Copyright (C) 1996 6 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) 7 * Laboratory for Computer Science Research Computing Facility 8 * Rutgers, The State University of New Jersey 9 * 10 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406 11 * 12 * 4.4BSD (FreeBSD) support added on February 1st 1998 by 13 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based 14 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. 15 * 16 * Migration to usage of "page cache" on May 2006 by 17 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base. 18 */ 19 20 #include <linux/time.h> 21 #include <linux/fs.h> 22 #include <linux/filelock.h> 23 #include <linux/swap.h> 24 #include <linux/iversion.h> 25 26 #include "ufs_fs.h" 27 #include "ufs.h" 28 #include "swab.h" 29 #include "util.h" 30 31 /* 32 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. 33 * 34 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. 35 */ 36 static inline int ufs_match(struct super_block *sb, int len, 37 const unsigned char *name, struct ufs_dir_entry *de) 38 { 39 if (len != ufs_get_de_namlen(sb, de)) 40 return 0; 41 if (!de->d_ino) 42 return 0; 43 return !memcmp(name, de->d_name, len); 44 } 45 46 static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 47 { 48 struct address_space *mapping = folio->mapping; 49 struct inode *dir = mapping->host; 50 51 inode_inc_iversion(dir); 52 block_write_end(pos, len, len, folio); 53 if (pos+len > dir->i_size) { 54 i_size_write(dir, pos+len); 55 mark_inode_dirty(dir); 56 } 57 folio_unlock(folio); 58 } 59 60 static int ufs_handle_dirsync(struct inode *dir) 61 { 62 int err; 63 64 err = filemap_write_and_wait(dir->i_mapping); 65 if (!err) 66 err = sync_inode_metadata(dir, 1); 67 return err; 68 } 69 70 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 71 { 72 ino_t res = 0; 73 struct ufs_dir_entry *de; 74 struct folio *folio; 75 76 de = ufs_find_entry(dir, qstr, &folio); 77 if (de) { 78 res = fs32_to_cpu(dir->i_sb, de->d_ino); 79 folio_release_kmap(folio, de); 80 } 81 return res; 82 } 83 84 85 int ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 86 struct folio *folio, struct inode *inode, 87 bool update_times) 88 { 89 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 90 unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); 91 int err; 92 93 folio_lock(folio); 94 err = ufs_prepare_chunk(folio, pos, len); 95 if (unlikely(err)) { 96 folio_unlock(folio); 97 return err; 98 } 99 100 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 101 ufs_set_de_type(dir->i_sb, de, inode->i_mode); 102 103 ufs_commit_chunk(folio, pos, len); 104 if (update_times) 105 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 106 mark_inode_dirty(dir); 107 return ufs_handle_dirsync(dir); 108 } 109 110 static bool ufs_check_folio(struct folio *folio, char *kaddr) 111 { 112 struct inode *dir = folio->mapping->host; 113 struct super_block *sb = dir->i_sb; 114 unsigned offs, rec_len; 115 unsigned limit = folio_size(folio); 116 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 117 struct ufs_dir_entry *p; 118 char *error; 119 120 if (dir->i_size < folio_pos(folio) + limit) { 121 limit = offset_in_folio(folio, dir->i_size); 122 if (limit & chunk_mask) 123 goto Ebadsize; 124 if (!limit) 125 goto out; 126 } 127 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { 128 p = (struct ufs_dir_entry *)(kaddr + offs); 129 rec_len = fs16_to_cpu(sb, p->d_reclen); 130 131 if (rec_len < UFS_DIR_REC_LEN(1)) 132 goto Eshort; 133 if (rec_len & 3) 134 goto Ealign; 135 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) 136 goto Enamelen; 137 if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) 138 goto Espan; 139 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * 140 UFS_SB(sb)->s_uspi->s_ncg)) 141 goto Einumber; 142 } 143 if (offs != limit) 144 goto Eend; 145 out: 146 folio_set_checked(folio); 147 return true; 148 149 /* Too bad, we had an error */ 150 151 Ebadsize: 152 ufs_error(sb, __func__, 153 "size of directory #%lu is not a multiple of chunk size", 154 dir->i_ino 155 ); 156 goto fail; 157 Eshort: 158 error = "rec_len is smaller than minimal"; 159 goto bad_entry; 160 Ealign: 161 error = "unaligned directory entry"; 162 goto bad_entry; 163 Enamelen: 164 error = "rec_len is too small for name_len"; 165 goto bad_entry; 166 Espan: 167 error = "directory entry across blocks"; 168 goto bad_entry; 169 Einumber: 170 error = "inode out of bounds"; 171 bad_entry: 172 ufs_error(sb, __func__, "bad entry in directory #%lu: %s - " 173 "offset=%llu, rec_len=%d, name_len=%d", 174 dir->i_ino, error, folio_pos(folio) + offs, 175 rec_len, ufs_get_de_namlen(sb, p)); 176 goto fail; 177 Eend: 178 p = (struct ufs_dir_entry *)(kaddr + offs); 179 ufs_error(sb, __func__, 180 "entry in directory #%lu spans the page boundary" 181 "offset=%llu", 182 dir->i_ino, folio_pos(folio) + offs); 183 fail: 184 return false; 185 } 186 187 static void *ufs_get_folio(struct inode *dir, unsigned long n, 188 struct folio **foliop) 189 { 190 struct address_space *mapping = dir->i_mapping; 191 struct folio *folio = read_mapping_folio(mapping, n, NULL); 192 void *kaddr; 193 194 if (IS_ERR(folio)) 195 return ERR_CAST(folio); 196 kaddr = kmap_local_folio(folio, 0); 197 if (unlikely(!folio_test_checked(folio))) { 198 if (!ufs_check_folio(folio, kaddr)) 199 goto fail; 200 } 201 *foliop = folio; 202 return kaddr; 203 204 fail: 205 folio_release_kmap(folio, kaddr); 206 return ERR_PTR(-EIO); 207 } 208 209 /* 210 * Return the offset into page `page_nr' of the last valid 211 * byte in that page, plus one. 212 */ 213 static unsigned 214 ufs_last_byte(struct inode *inode, unsigned long page_nr) 215 { 216 unsigned last_byte = inode->i_size; 217 218 last_byte -= page_nr << PAGE_SHIFT; 219 if (last_byte > PAGE_SIZE) 220 last_byte = PAGE_SIZE; 221 return last_byte; 222 } 223 224 static inline struct ufs_dir_entry * 225 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p) 226 { 227 return (struct ufs_dir_entry *)((char *)p + 228 fs16_to_cpu(sb, p->d_reclen)); 229 } 230 231 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop) 232 { 233 struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop); 234 235 if (!IS_ERR(de)) 236 return ufs_next_entry(dir->i_sb, de); 237 238 return NULL; 239 } 240 241 /* 242 * ufs_find_entry() 243 * 244 * finds an entry in the specified directory with the wanted name. It 245 * returns the page in which the entry was found, and the entry itself 246 * (as a parameter - res_dir). Page is returned mapped and unlocked. 247 * Entry is guaranteed to be valid. 248 */ 249 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, 250 struct folio **foliop) 251 { 252 struct super_block *sb = dir->i_sb; 253 const unsigned char *name = qstr->name; 254 int namelen = qstr->len; 255 unsigned reclen = UFS_DIR_REC_LEN(namelen); 256 unsigned long start, n; 257 unsigned long npages = dir_pages(dir); 258 struct ufs_inode_info *ui = UFS_I(dir); 259 struct ufs_dir_entry *de; 260 261 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen); 262 263 if (npages == 0 || namelen > UFS_MAXNAMLEN) 264 goto out; 265 266 start = ui->i_dir_start_lookup; 267 268 if (start >= npages) 269 start = 0; 270 n = start; 271 do { 272 char *kaddr = ufs_get_folio(dir, n, foliop); 273 274 if (!IS_ERR(kaddr)) { 275 de = (struct ufs_dir_entry *)kaddr; 276 kaddr += ufs_last_byte(dir, n) - reclen; 277 while ((char *) de <= kaddr) { 278 if (ufs_match(sb, namelen, name, de)) 279 goto found; 280 de = ufs_next_entry(sb, de); 281 } 282 folio_release_kmap(*foliop, kaddr); 283 } 284 if (++n >= npages) 285 n = 0; 286 } while (n != start); 287 out: 288 return NULL; 289 290 found: 291 ui->i_dir_start_lookup = n; 292 return de; 293 } 294 295 /* 296 * Parent is locked. 297 */ 298 int ufs_add_link(struct dentry *dentry, struct inode *inode) 299 { 300 struct inode *dir = d_inode(dentry->d_parent); 301 const unsigned char *name = dentry->d_name.name; 302 int namelen = dentry->d_name.len; 303 struct super_block *sb = dir->i_sb; 304 unsigned reclen = UFS_DIR_REC_LEN(namelen); 305 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 306 unsigned short rec_len, name_len; 307 struct folio *folio = NULL; 308 struct ufs_dir_entry *de; 309 unsigned long npages = dir_pages(dir); 310 unsigned long n; 311 loff_t pos; 312 int err; 313 314 UFSD("ENTER, name %s, namelen %u\n", name, namelen); 315 316 /* 317 * We take care of directory expansion in the same loop. 318 * This code plays outside i_size, so it locks the folio 319 * to protect that region. 320 */ 321 for (n = 0; n <= npages; n++) { 322 char *kaddr = ufs_get_folio(dir, n, &folio); 323 char *dir_end; 324 325 if (IS_ERR(kaddr)) 326 return PTR_ERR(kaddr); 327 folio_lock(folio); 328 dir_end = kaddr + ufs_last_byte(dir, n); 329 de = (struct ufs_dir_entry *)kaddr; 330 kaddr += folio_size(folio) - reclen; 331 while ((char *)de <= kaddr) { 332 if ((char *)de == dir_end) { 333 /* We hit i_size */ 334 name_len = 0; 335 rec_len = chunk_size; 336 de->d_reclen = cpu_to_fs16(sb, chunk_size); 337 de->d_ino = 0; 338 goto got_it; 339 } 340 if (de->d_reclen == 0) { 341 ufs_error(dir->i_sb, __func__, 342 "zero-length directory entry"); 343 err = -EIO; 344 goto out_unlock; 345 } 346 err = -EEXIST; 347 if (ufs_match(sb, namelen, name, de)) 348 goto out_unlock; 349 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)); 350 rec_len = fs16_to_cpu(sb, de->d_reclen); 351 if (!de->d_ino && rec_len >= reclen) 352 goto got_it; 353 if (rec_len >= name_len + reclen) 354 goto got_it; 355 de = (struct ufs_dir_entry *) ((char *) de + rec_len); 356 } 357 folio_unlock(folio); 358 folio_release_kmap(folio, kaddr); 359 } 360 BUG(); 361 return -EINVAL; 362 363 got_it: 364 pos = folio_pos(folio) + offset_in_folio(folio, de); 365 err = ufs_prepare_chunk(folio, pos, rec_len); 366 if (err) 367 goto out_unlock; 368 if (de->d_ino) { 369 struct ufs_dir_entry *de1 = 370 (struct ufs_dir_entry *) ((char *) de + name_len); 371 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len); 372 de->d_reclen = cpu_to_fs16(sb, name_len); 373 374 de = de1; 375 } 376 377 ufs_set_de_namlen(sb, de, namelen); 378 memcpy(de->d_name, name, namelen + 1); 379 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 380 ufs_set_de_type(sb, de, inode->i_mode); 381 382 ufs_commit_chunk(folio, pos, rec_len); 383 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 384 385 mark_inode_dirty(dir); 386 err = ufs_handle_dirsync(dir); 387 /* OFFSET_CACHE */ 388 out_put: 389 folio_release_kmap(folio, de); 390 return err; 391 out_unlock: 392 folio_unlock(folio); 393 goto out_put; 394 } 395 396 static inline unsigned 397 ufs_validate_entry(struct super_block *sb, char *base, 398 unsigned offset, unsigned mask) 399 { 400 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset); 401 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask)); 402 while ((char*)p < (char*)de) 403 p = ufs_next_entry(sb, p); 404 return (char *)p - base; 405 } 406 407 408 /* 409 * This is blatantly stolen from ext2fs 410 */ 411 static int 412 ufs_readdir(struct file *file, struct dir_context *ctx) 413 { 414 loff_t pos = ctx->pos; 415 struct inode *inode = file_inode(file); 416 struct super_block *sb = inode->i_sb; 417 unsigned int offset = pos & ~PAGE_MASK; 418 unsigned long n = pos >> PAGE_SHIFT; 419 unsigned long npages = dir_pages(inode); 420 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 421 bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data); 422 unsigned flags = UFS_SB(sb)->s_flags; 423 424 UFSD("BEGIN\n"); 425 426 if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) 427 return 0; 428 429 for ( ; n < npages; n++, offset = 0) { 430 struct ufs_dir_entry *de; 431 struct folio *folio; 432 char *kaddr = ufs_get_folio(inode, n, &folio); 433 char *limit; 434 435 if (IS_ERR(kaddr)) { 436 ufs_error(sb, __func__, 437 "bad page in #%lu", 438 inode->i_ino); 439 ctx->pos += PAGE_SIZE - offset; 440 return PTR_ERR(kaddr); 441 } 442 if (unlikely(need_revalidate)) { 443 if (offset) { 444 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 445 ctx->pos = (n<<PAGE_SHIFT) + offset; 446 } 447 *(u64 *)file->private_data = inode_query_iversion(inode); 448 need_revalidate = false; 449 } 450 de = (struct ufs_dir_entry *)(kaddr+offset); 451 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); 452 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) { 453 if (de->d_ino) { 454 unsigned char d_type = DT_UNKNOWN; 455 456 UFSD("filldir(%s,%u)\n", de->d_name, 457 fs32_to_cpu(sb, de->d_ino)); 458 UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); 459 460 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) 461 d_type = de->d_u.d_44.d_type; 462 463 if (!dir_emit(ctx, de->d_name, 464 ufs_get_de_namlen(sb, de), 465 fs32_to_cpu(sb, de->d_ino), 466 d_type)) { 467 folio_release_kmap(folio, de); 468 return 0; 469 } 470 } 471 ctx->pos += fs16_to_cpu(sb, de->d_reclen); 472 } 473 folio_release_kmap(folio, kaddr); 474 } 475 return 0; 476 } 477 478 479 /* 480 * ufs_delete_entry deletes a directory entry by merging it with the 481 * previous entry. 482 */ 483 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, 484 struct folio *folio) 485 { 486 struct super_block *sb = inode->i_sb; 487 size_t from, to; 488 char *kaddr; 489 loff_t pos; 490 struct ufs_dir_entry *de, *pde = NULL; 491 int err; 492 493 UFSD("ENTER\n"); 494 495 from = offset_in_folio(folio, dir); 496 to = from + fs16_to_cpu(sb, dir->d_reclen); 497 kaddr = (char *)dir - from; 498 from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 499 de = (struct ufs_dir_entry *) (kaddr + from); 500 501 UFSD("ino %u, reclen %u, namlen %u, name %s\n", 502 fs32_to_cpu(sb, de->d_ino), 503 fs16_to_cpu(sb, de->d_reclen), 504 ufs_get_de_namlen(sb, de), de->d_name); 505 506 while ((char*)de < (char*)dir) { 507 if (de->d_reclen == 0) { 508 ufs_error(inode->i_sb, __func__, 509 "zero-length directory entry"); 510 return -EIO; 511 } 512 pde = de; 513 de = ufs_next_entry(sb, de); 514 } 515 if (pde) 516 from = offset_in_folio(folio, pde); 517 pos = folio_pos(folio) + from; 518 folio_lock(folio); 519 err = ufs_prepare_chunk(folio, pos, to - from); 520 if (unlikely(err)) { 521 folio_unlock(folio); 522 return err; 523 } 524 if (pde) 525 pde->d_reclen = cpu_to_fs16(sb, to - from); 526 dir->d_ino = 0; 527 ufs_commit_chunk(folio, pos, to - from); 528 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 529 mark_inode_dirty(inode); 530 return ufs_handle_dirsync(inode); 531 } 532 533 int ufs_make_empty(struct inode * inode, struct inode *dir) 534 { 535 struct super_block * sb = dir->i_sb; 536 struct address_space *mapping = inode->i_mapping; 537 struct folio *folio = filemap_grab_folio(mapping, 0); 538 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 539 struct ufs_dir_entry * de; 540 int err; 541 char *kaddr; 542 543 if (IS_ERR(folio)) 544 return PTR_ERR(folio); 545 546 err = ufs_prepare_chunk(folio, 0, chunk_size); 547 if (err) { 548 folio_unlock(folio); 549 goto fail; 550 } 551 552 kaddr = kmap_local_folio(folio, 0); 553 memset(kaddr, 0, folio_size(folio)); 554 555 de = (struct ufs_dir_entry *)kaddr; 556 557 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 558 ufs_set_de_type(sb, de, inode->i_mode); 559 ufs_set_de_namlen(sb, de, 1); 560 de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1)); 561 strcpy (de->d_name, "."); 562 de = (struct ufs_dir_entry *) 563 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 564 de->d_ino = cpu_to_fs32(sb, dir->i_ino); 565 ufs_set_de_type(sb, de, dir->i_mode); 566 de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); 567 ufs_set_de_namlen(sb, de, 2); 568 strcpy (de->d_name, ".."); 569 kunmap_local(kaddr); 570 571 ufs_commit_chunk(folio, 0, chunk_size); 572 err = ufs_handle_dirsync(inode); 573 fail: 574 folio_put(folio); 575 return err; 576 } 577 578 /* 579 * routine to check that the specified directory is empty (for rmdir) 580 */ 581 int ufs_empty_dir(struct inode * inode) 582 { 583 struct super_block *sb = inode->i_sb; 584 struct folio *folio; 585 char *kaddr; 586 unsigned long i, npages = dir_pages(inode); 587 588 for (i = 0; i < npages; i++) { 589 struct ufs_dir_entry *de; 590 591 kaddr = ufs_get_folio(inode, i, &folio); 592 if (IS_ERR(kaddr)) 593 continue; 594 595 de = (struct ufs_dir_entry *)kaddr; 596 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); 597 598 while ((char *)de <= kaddr) { 599 if (de->d_reclen == 0) { 600 ufs_error(inode->i_sb, __func__, 601 "zero-length directory entry: " 602 "kaddr=%p, de=%p\n", kaddr, de); 603 goto not_empty; 604 } 605 if (de->d_ino) { 606 u16 namelen=ufs_get_de_namlen(sb, de); 607 /* check for . and .. */ 608 if (de->d_name[0] != '.') 609 goto not_empty; 610 if (namelen > 2) 611 goto not_empty; 612 if (namelen < 2) { 613 if (inode->i_ino != 614 fs32_to_cpu(sb, de->d_ino)) 615 goto not_empty; 616 } else if (de->d_name[1] != '.') 617 goto not_empty; 618 } 619 de = ufs_next_entry(sb, de); 620 } 621 folio_release_kmap(folio, kaddr); 622 } 623 return 1; 624 625 not_empty: 626 folio_release_kmap(folio, kaddr); 627 return 0; 628 } 629 630 static int ufs_dir_open(struct inode *inode, struct file *file) 631 { 632 file->private_data = kzalloc(sizeof(u64), GFP_KERNEL); 633 if (!file->private_data) 634 return -ENOMEM; 635 return 0; 636 } 637 638 static int ufs_dir_release(struct inode *inode, struct file *file) 639 { 640 kfree(file->private_data); 641 return 0; 642 } 643 644 static loff_t ufs_dir_llseek(struct file *file, loff_t offset, int whence) 645 { 646 return generic_llseek_cookie(file, offset, whence, 647 (u64 *)file->private_data); 648 } 649 650 const struct file_operations ufs_dir_operations = { 651 .open = ufs_dir_open, 652 .release = ufs_dir_release, 653 .read = generic_read_dir, 654 .iterate_shared = ufs_readdir, 655 .fsync = generic_file_fsync, 656 .llseek = ufs_dir_llseek, 657 .setlease = generic_setlease, 658 }; 659