1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ufs/ufs_dir.c 4 * 5 * Copyright (C) 1996 6 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) 7 * Laboratory for Computer Science Research Computing Facility 8 * Rutgers, The State University of New Jersey 9 * 10 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406 11 * 12 * 4.4BSD (FreeBSD) support added on February 1st 1998 by 13 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based 14 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. 15 * 16 * Migration to usage of "page cache" on May 2006 by 17 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base. 18 */ 19 20 #include <linux/time.h> 21 #include <linux/fs.h> 22 #include <linux/swap.h> 23 #include <linux/iversion.h> 24 25 #include "ufs_fs.h" 26 #include "ufs.h" 27 #include "swab.h" 28 #include "util.h" 29 30 /* 31 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. 32 * 33 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. 34 */ 35 static inline int ufs_match(struct super_block *sb, int len, 36 const unsigned char *name, struct ufs_dir_entry *de) 37 { 38 if (len != ufs_get_de_namlen(sb, de)) 39 return 0; 40 if (!de->d_ino) 41 return 0; 42 return !memcmp(name, de->d_name, len); 43 } 44 45 static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 46 { 47 struct address_space *mapping = folio->mapping; 48 struct inode *dir = mapping->host; 49 50 inode_inc_iversion(dir); 51 block_write_end(NULL, mapping, pos, len, len, folio, NULL); 52 if (pos+len > dir->i_size) { 53 i_size_write(dir, pos+len); 54 mark_inode_dirty(dir); 55 } 56 folio_unlock(folio); 57 } 58 59 static int ufs_handle_dirsync(struct inode *dir) 60 { 61 int err; 62 63 err = filemap_write_and_wait(dir->i_mapping); 64 if (!err) 65 err = sync_inode_metadata(dir, 1); 66 return err; 67 } 68 69 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 70 { 71 ino_t res = 0; 72 struct ufs_dir_entry *de; 73 struct folio *folio; 74 75 de = ufs_find_entry(dir, qstr, &folio); 76 if (de) { 77 res = fs32_to_cpu(dir->i_sb, de->d_ino); 78 folio_release_kmap(folio, de); 79 } 80 return res; 81 } 82 83 84 int ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 85 struct folio *folio, struct inode *inode, 86 bool update_times) 87 { 88 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 89 unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); 90 int err; 91 92 folio_lock(folio); 93 err = ufs_prepare_chunk(folio, pos, len); 94 if (unlikely(err)) { 95 folio_unlock(folio); 96 return err; 97 } 98 99 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 100 ufs_set_de_type(dir->i_sb, de, inode->i_mode); 101 102 ufs_commit_chunk(folio, pos, len); 103 if (update_times) 104 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 105 mark_inode_dirty(dir); 106 return ufs_handle_dirsync(dir); 107 } 108 109 static bool ufs_check_folio(struct folio *folio, char *kaddr) 110 { 111 struct inode *dir = folio->mapping->host; 112 struct super_block *sb = dir->i_sb; 113 unsigned offs, rec_len; 114 unsigned limit = folio_size(folio); 115 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 116 struct ufs_dir_entry *p; 117 char *error; 118 119 if (dir->i_size < folio_pos(folio) + limit) { 120 limit = offset_in_folio(folio, dir->i_size); 121 if (limit & chunk_mask) 122 goto Ebadsize; 123 if (!limit) 124 goto out; 125 } 126 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { 127 p = (struct ufs_dir_entry *)(kaddr + offs); 128 rec_len = fs16_to_cpu(sb, p->d_reclen); 129 130 if (rec_len < UFS_DIR_REC_LEN(1)) 131 goto Eshort; 132 if (rec_len & 3) 133 goto Ealign; 134 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) 135 goto Enamelen; 136 if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) 137 goto Espan; 138 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * 139 UFS_SB(sb)->s_uspi->s_ncg)) 140 goto Einumber; 141 } 142 if (offs != limit) 143 goto Eend; 144 out: 145 folio_set_checked(folio); 146 return true; 147 148 /* Too bad, we had an error */ 149 150 Ebadsize: 151 ufs_error(sb, __func__, 152 "size of directory #%lu is not a multiple of chunk size", 153 dir->i_ino 154 ); 155 goto fail; 156 Eshort: 157 error = "rec_len is smaller than minimal"; 158 goto bad_entry; 159 Ealign: 160 error = "unaligned directory entry"; 161 goto bad_entry; 162 Enamelen: 163 error = "rec_len is too small for name_len"; 164 goto bad_entry; 165 Espan: 166 error = "directory entry across blocks"; 167 goto bad_entry; 168 Einumber: 169 error = "inode out of bounds"; 170 bad_entry: 171 ufs_error(sb, __func__, "bad entry in directory #%lu: %s - " 172 "offset=%llu, rec_len=%d, name_len=%d", 173 dir->i_ino, error, folio_pos(folio) + offs, 174 rec_len, ufs_get_de_namlen(sb, p)); 175 goto fail; 176 Eend: 177 p = (struct ufs_dir_entry *)(kaddr + offs); 178 ufs_error(sb, __func__, 179 "entry in directory #%lu spans the page boundary" 180 "offset=%llu", 181 dir->i_ino, folio_pos(folio) + offs); 182 fail: 183 return false; 184 } 185 186 static void *ufs_get_folio(struct inode *dir, unsigned long n, 187 struct folio **foliop) 188 { 189 struct address_space *mapping = dir->i_mapping; 190 struct folio *folio = read_mapping_folio(mapping, n, NULL); 191 void *kaddr; 192 193 if (IS_ERR(folio)) 194 return ERR_CAST(folio); 195 kaddr = kmap_local_folio(folio, 0); 196 if (unlikely(!folio_test_checked(folio))) { 197 if (!ufs_check_folio(folio, kaddr)) 198 goto fail; 199 } 200 *foliop = folio; 201 return kaddr; 202 203 fail: 204 folio_release_kmap(folio, kaddr); 205 return ERR_PTR(-EIO); 206 } 207 208 /* 209 * Return the offset into page `page_nr' of the last valid 210 * byte in that page, plus one. 211 */ 212 static unsigned 213 ufs_last_byte(struct inode *inode, unsigned long page_nr) 214 { 215 unsigned last_byte = inode->i_size; 216 217 last_byte -= page_nr << PAGE_SHIFT; 218 if (last_byte > PAGE_SIZE) 219 last_byte = PAGE_SIZE; 220 return last_byte; 221 } 222 223 static inline struct ufs_dir_entry * 224 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p) 225 { 226 return (struct ufs_dir_entry *)((char *)p + 227 fs16_to_cpu(sb, p->d_reclen)); 228 } 229 230 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop) 231 { 232 struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop); 233 234 if (!IS_ERR(de)) 235 return ufs_next_entry(dir->i_sb, de); 236 237 return NULL; 238 } 239 240 /* 241 * ufs_find_entry() 242 * 243 * finds an entry in the specified directory with the wanted name. It 244 * returns the page in which the entry was found, and the entry itself 245 * (as a parameter - res_dir). Page is returned mapped and unlocked. 246 * Entry is guaranteed to be valid. 247 */ 248 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, 249 struct folio **foliop) 250 { 251 struct super_block *sb = dir->i_sb; 252 const unsigned char *name = qstr->name; 253 int namelen = qstr->len; 254 unsigned reclen = UFS_DIR_REC_LEN(namelen); 255 unsigned long start, n; 256 unsigned long npages = dir_pages(dir); 257 struct ufs_inode_info *ui = UFS_I(dir); 258 struct ufs_dir_entry *de; 259 260 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen); 261 262 if (npages == 0 || namelen > UFS_MAXNAMLEN) 263 goto out; 264 265 start = ui->i_dir_start_lookup; 266 267 if (start >= npages) 268 start = 0; 269 n = start; 270 do { 271 char *kaddr = ufs_get_folio(dir, n, foliop); 272 273 if (!IS_ERR(kaddr)) { 274 de = (struct ufs_dir_entry *)kaddr; 275 kaddr += ufs_last_byte(dir, n) - reclen; 276 while ((char *) de <= kaddr) { 277 if (ufs_match(sb, namelen, name, de)) 278 goto found; 279 de = ufs_next_entry(sb, de); 280 } 281 folio_release_kmap(*foliop, kaddr); 282 } 283 if (++n >= npages) 284 n = 0; 285 } while (n != start); 286 out: 287 return NULL; 288 289 found: 290 ui->i_dir_start_lookup = n; 291 return de; 292 } 293 294 /* 295 * Parent is locked. 296 */ 297 int ufs_add_link(struct dentry *dentry, struct inode *inode) 298 { 299 struct inode *dir = d_inode(dentry->d_parent); 300 const unsigned char *name = dentry->d_name.name; 301 int namelen = dentry->d_name.len; 302 struct super_block *sb = dir->i_sb; 303 unsigned reclen = UFS_DIR_REC_LEN(namelen); 304 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 305 unsigned short rec_len, name_len; 306 struct folio *folio = NULL; 307 struct ufs_dir_entry *de; 308 unsigned long npages = dir_pages(dir); 309 unsigned long n; 310 loff_t pos; 311 int err; 312 313 UFSD("ENTER, name %s, namelen %u\n", name, namelen); 314 315 /* 316 * We take care of directory expansion in the same loop. 317 * This code plays outside i_size, so it locks the folio 318 * to protect that region. 319 */ 320 for (n = 0; n <= npages; n++) { 321 char *kaddr = ufs_get_folio(dir, n, &folio); 322 char *dir_end; 323 324 if (IS_ERR(kaddr)) 325 return PTR_ERR(kaddr); 326 folio_lock(folio); 327 dir_end = kaddr + ufs_last_byte(dir, n); 328 de = (struct ufs_dir_entry *)kaddr; 329 kaddr += folio_size(folio) - reclen; 330 while ((char *)de <= kaddr) { 331 if ((char *)de == dir_end) { 332 /* We hit i_size */ 333 name_len = 0; 334 rec_len = chunk_size; 335 de->d_reclen = cpu_to_fs16(sb, chunk_size); 336 de->d_ino = 0; 337 goto got_it; 338 } 339 if (de->d_reclen == 0) { 340 ufs_error(dir->i_sb, __func__, 341 "zero-length directory entry"); 342 err = -EIO; 343 goto out_unlock; 344 } 345 err = -EEXIST; 346 if (ufs_match(sb, namelen, name, de)) 347 goto out_unlock; 348 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)); 349 rec_len = fs16_to_cpu(sb, de->d_reclen); 350 if (!de->d_ino && rec_len >= reclen) 351 goto got_it; 352 if (rec_len >= name_len + reclen) 353 goto got_it; 354 de = (struct ufs_dir_entry *) ((char *) de + rec_len); 355 } 356 folio_unlock(folio); 357 folio_release_kmap(folio, kaddr); 358 } 359 BUG(); 360 return -EINVAL; 361 362 got_it: 363 pos = folio_pos(folio) + offset_in_folio(folio, de); 364 err = ufs_prepare_chunk(folio, pos, rec_len); 365 if (err) 366 goto out_unlock; 367 if (de->d_ino) { 368 struct ufs_dir_entry *de1 = 369 (struct ufs_dir_entry *) ((char *) de + name_len); 370 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len); 371 de->d_reclen = cpu_to_fs16(sb, name_len); 372 373 de = de1; 374 } 375 376 ufs_set_de_namlen(sb, de, namelen); 377 memcpy(de->d_name, name, namelen + 1); 378 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 379 ufs_set_de_type(sb, de, inode->i_mode); 380 381 ufs_commit_chunk(folio, pos, rec_len); 382 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 383 384 mark_inode_dirty(dir); 385 err = ufs_handle_dirsync(dir); 386 /* OFFSET_CACHE */ 387 out_put: 388 folio_release_kmap(folio, de); 389 return err; 390 out_unlock: 391 folio_unlock(folio); 392 goto out_put; 393 } 394 395 static inline unsigned 396 ufs_validate_entry(struct super_block *sb, char *base, 397 unsigned offset, unsigned mask) 398 { 399 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset); 400 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask)); 401 while ((char*)p < (char*)de) 402 p = ufs_next_entry(sb, p); 403 return (char *)p - base; 404 } 405 406 407 /* 408 * This is blatantly stolen from ext2fs 409 */ 410 static int 411 ufs_readdir(struct file *file, struct dir_context *ctx) 412 { 413 loff_t pos = ctx->pos; 414 struct inode *inode = file_inode(file); 415 struct super_block *sb = inode->i_sb; 416 unsigned int offset = pos & ~PAGE_MASK; 417 unsigned long n = pos >> PAGE_SHIFT; 418 unsigned long npages = dir_pages(inode); 419 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 420 bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data); 421 unsigned flags = UFS_SB(sb)->s_flags; 422 423 UFSD("BEGIN\n"); 424 425 if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) 426 return 0; 427 428 for ( ; n < npages; n++, offset = 0) { 429 struct ufs_dir_entry *de; 430 struct folio *folio; 431 char *kaddr = ufs_get_folio(inode, n, &folio); 432 char *limit; 433 434 if (IS_ERR(kaddr)) { 435 ufs_error(sb, __func__, 436 "bad page in #%lu", 437 inode->i_ino); 438 ctx->pos += PAGE_SIZE - offset; 439 return PTR_ERR(kaddr); 440 } 441 if (unlikely(need_revalidate)) { 442 if (offset) { 443 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 444 ctx->pos = (n<<PAGE_SHIFT) + offset; 445 } 446 *(u64 *)file->private_data = inode_query_iversion(inode); 447 need_revalidate = false; 448 } 449 de = (struct ufs_dir_entry *)(kaddr+offset); 450 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); 451 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) { 452 if (de->d_ino) { 453 unsigned char d_type = DT_UNKNOWN; 454 455 UFSD("filldir(%s,%u)\n", de->d_name, 456 fs32_to_cpu(sb, de->d_ino)); 457 UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); 458 459 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) 460 d_type = de->d_u.d_44.d_type; 461 462 if (!dir_emit(ctx, de->d_name, 463 ufs_get_de_namlen(sb, de), 464 fs32_to_cpu(sb, de->d_ino), 465 d_type)) { 466 folio_release_kmap(folio, de); 467 return 0; 468 } 469 } 470 ctx->pos += fs16_to_cpu(sb, de->d_reclen); 471 } 472 folio_release_kmap(folio, kaddr); 473 } 474 return 0; 475 } 476 477 478 /* 479 * ufs_delete_entry deletes a directory entry by merging it with the 480 * previous entry. 481 */ 482 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, 483 struct folio *folio) 484 { 485 struct super_block *sb = inode->i_sb; 486 size_t from, to; 487 char *kaddr; 488 loff_t pos; 489 struct ufs_dir_entry *de, *pde = NULL; 490 int err; 491 492 UFSD("ENTER\n"); 493 494 from = offset_in_folio(folio, dir); 495 to = from + fs16_to_cpu(sb, dir->d_reclen); 496 kaddr = (char *)dir - from; 497 from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 498 de = (struct ufs_dir_entry *) (kaddr + from); 499 500 UFSD("ino %u, reclen %u, namlen %u, name %s\n", 501 fs32_to_cpu(sb, de->d_ino), 502 fs16_to_cpu(sb, de->d_reclen), 503 ufs_get_de_namlen(sb, de), de->d_name); 504 505 while ((char*)de < (char*)dir) { 506 if (de->d_reclen == 0) { 507 ufs_error(inode->i_sb, __func__, 508 "zero-length directory entry"); 509 return -EIO; 510 } 511 pde = de; 512 de = ufs_next_entry(sb, de); 513 } 514 if (pde) 515 from = offset_in_folio(folio, pde); 516 pos = folio_pos(folio) + from; 517 folio_lock(folio); 518 err = ufs_prepare_chunk(folio, pos, to - from); 519 if (unlikely(err)) { 520 folio_unlock(folio); 521 return err; 522 } 523 if (pde) 524 pde->d_reclen = cpu_to_fs16(sb, to - from); 525 dir->d_ino = 0; 526 ufs_commit_chunk(folio, pos, to - from); 527 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 528 mark_inode_dirty(inode); 529 return ufs_handle_dirsync(inode); 530 } 531 532 int ufs_make_empty(struct inode * inode, struct inode *dir) 533 { 534 struct super_block * sb = dir->i_sb; 535 struct address_space *mapping = inode->i_mapping; 536 struct folio *folio = filemap_grab_folio(mapping, 0); 537 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 538 struct ufs_dir_entry * de; 539 int err; 540 char *kaddr; 541 542 if (IS_ERR(folio)) 543 return PTR_ERR(folio); 544 545 err = ufs_prepare_chunk(folio, 0, chunk_size); 546 if (err) { 547 folio_unlock(folio); 548 goto fail; 549 } 550 551 kaddr = kmap_local_folio(folio, 0); 552 memset(kaddr, 0, folio_size(folio)); 553 554 de = (struct ufs_dir_entry *)kaddr; 555 556 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 557 ufs_set_de_type(sb, de, inode->i_mode); 558 ufs_set_de_namlen(sb, de, 1); 559 de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1)); 560 strcpy (de->d_name, "."); 561 de = (struct ufs_dir_entry *) 562 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 563 de->d_ino = cpu_to_fs32(sb, dir->i_ino); 564 ufs_set_de_type(sb, de, dir->i_mode); 565 de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); 566 ufs_set_de_namlen(sb, de, 2); 567 strcpy (de->d_name, ".."); 568 kunmap_local(kaddr); 569 570 ufs_commit_chunk(folio, 0, chunk_size); 571 err = ufs_handle_dirsync(inode); 572 fail: 573 folio_put(folio); 574 return err; 575 } 576 577 /* 578 * routine to check that the specified directory is empty (for rmdir) 579 */ 580 int ufs_empty_dir(struct inode * inode) 581 { 582 struct super_block *sb = inode->i_sb; 583 struct folio *folio; 584 char *kaddr; 585 unsigned long i, npages = dir_pages(inode); 586 587 for (i = 0; i < npages; i++) { 588 struct ufs_dir_entry *de; 589 590 kaddr = ufs_get_folio(inode, i, &folio); 591 if (IS_ERR(kaddr)) 592 continue; 593 594 de = (struct ufs_dir_entry *)kaddr; 595 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); 596 597 while ((char *)de <= kaddr) { 598 if (de->d_reclen == 0) { 599 ufs_error(inode->i_sb, __func__, 600 "zero-length directory entry: " 601 "kaddr=%p, de=%p\n", kaddr, de); 602 goto not_empty; 603 } 604 if (de->d_ino) { 605 u16 namelen=ufs_get_de_namlen(sb, de); 606 /* check for . and .. */ 607 if (de->d_name[0] != '.') 608 goto not_empty; 609 if (namelen > 2) 610 goto not_empty; 611 if (namelen < 2) { 612 if (inode->i_ino != 613 fs32_to_cpu(sb, de->d_ino)) 614 goto not_empty; 615 } else if (de->d_name[1] != '.') 616 goto not_empty; 617 } 618 de = ufs_next_entry(sb, de); 619 } 620 folio_release_kmap(folio, kaddr); 621 } 622 return 1; 623 624 not_empty: 625 folio_release_kmap(folio, kaddr); 626 return 0; 627 } 628 629 static int ufs_dir_open(struct inode *inode, struct file *file) 630 { 631 file->private_data = kzalloc(sizeof(u64), GFP_KERNEL); 632 if (!file->private_data) 633 return -ENOMEM; 634 return 0; 635 } 636 637 static int ufs_dir_release(struct inode *inode, struct file *file) 638 { 639 kfree(file->private_data); 640 return 0; 641 } 642 643 static loff_t ufs_dir_llseek(struct file *file, loff_t offset, int whence) 644 { 645 return generic_llseek_cookie(file, offset, whence, 646 (u64 *)file->private_data); 647 } 648 649 const struct file_operations ufs_dir_operations = { 650 .open = ufs_dir_open, 651 .release = ufs_dir_release, 652 .read = generic_read_dir, 653 .iterate_shared = ufs_readdir, 654 .fsync = generic_file_fsync, 655 .llseek = ufs_dir_llseek, 656 }; 657