1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/dir.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/dir.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * ext2 directory handling functions 17 * 18 * Big-endian to little-endian byte-swapping/bitmaps by 19 * David S. Miller (davem@caip.rutgers.edu), 1995 20 * 21 * All code that works with directory layout had been switched to pagecache 22 * and moved here. AV 23 */ 24 25 #include "ext2.h" 26 #include <linux/buffer_head.h> 27 #include <linux/filelock.h> 28 #include <linux/pagemap.h> 29 #include <linux/swap.h> 30 #include <linux/iversion.h> 31 32 typedef struct ext2_dir_entry_2 ext2_dirent; 33 34 /* 35 * Tests against MAX_REC_LEN etc were put in place for 64k block 36 * sizes; if that is not possible on this arch, we can skip 37 * those tests and speed things up. 38 */ 39 static inline unsigned ext2_rec_len_from_disk(__le16 dlen) 40 { 41 unsigned len = le16_to_cpu(dlen); 42 43 #if (PAGE_SIZE >= 65536) 44 if (len == EXT2_MAX_REC_LEN) 45 return 1 << 16; 46 #endif 47 return len; 48 } 49 50 static inline __le16 ext2_rec_len_to_disk(unsigned len) 51 { 52 #if (PAGE_SIZE >= 65536) 53 if (len == (1 << 16)) 54 return cpu_to_le16(EXT2_MAX_REC_LEN); 55 else 56 BUG_ON(len > (1 << 16)); 57 #endif 58 return cpu_to_le16(len); 59 } 60 61 /* 62 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be 63 * more robust, but we have what we have 64 */ 65 static inline unsigned ext2_chunk_size(struct inode *inode) 66 { 67 return inode->i_sb->s_blocksize; 68 } 69 70 /* 71 * Return the offset into page `page_nr' of the last valid 72 * byte in that page, plus one. 73 */ 74 static unsigned 75 ext2_last_byte(struct inode *inode, unsigned long page_nr) 76 { 77 unsigned last_byte = inode->i_size; 78 79 last_byte -= page_nr << PAGE_SHIFT; 80 if (last_byte > PAGE_SIZE) 81 last_byte = PAGE_SIZE; 82 return last_byte; 83 } 84 85 static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len) 86 { 87 struct address_space *mapping = folio->mapping; 88 struct inode *dir = mapping->host; 89 90 inode_inc_iversion(dir); 91 block_write_end(pos, len, len, folio); 92 93 if (pos+len > dir->i_size) { 94 i_size_write(dir, pos+len); 95 mark_inode_dirty(dir); 96 } 97 folio_unlock(folio); 98 } 99 100 static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr) 101 { 102 struct inode *dir = folio->mapping->host; 103 struct super_block *sb = dir->i_sb; 104 unsigned chunk_size = ext2_chunk_size(dir); 105 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 106 unsigned offs, rec_len; 107 unsigned limit = folio_size(folio); 108 ext2_dirent *p; 109 char *error; 110 111 if (dir->i_size < folio_pos(folio) + limit) { 112 limit = offset_in_folio(folio, dir->i_size); 113 if (limit & (chunk_size - 1)) 114 goto Ebadsize; 115 if (!limit) 116 goto out; 117 } 118 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { 119 p = (ext2_dirent *)(kaddr + offs); 120 rec_len = ext2_rec_len_from_disk(p->rec_len); 121 122 if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) 123 goto Eshort; 124 if (unlikely(rec_len & 3)) 125 goto Ealign; 126 if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) 127 goto Enamelen; 128 if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) 129 goto Espan; 130 if (unlikely(le32_to_cpu(p->inode) > max_inumber)) 131 goto Einumber; 132 } 133 if (offs != limit) 134 goto Eend; 135 out: 136 folio_set_checked(folio); 137 return true; 138 139 /* Too bad, we had an error */ 140 141 Ebadsize: 142 if (!quiet) 143 ext2_error(sb, __func__, 144 "size of directory #%lu is not a multiple " 145 "of chunk size", dir->i_ino); 146 goto fail; 147 Eshort: 148 error = "rec_len is smaller than minimal"; 149 goto bad_entry; 150 Ealign: 151 error = "unaligned directory entry"; 152 goto bad_entry; 153 Enamelen: 154 error = "rec_len is too small for name_len"; 155 goto bad_entry; 156 Espan: 157 error = "directory entry across blocks"; 158 goto bad_entry; 159 Einumber: 160 error = "inode out of bounds"; 161 bad_entry: 162 if (!quiet) 163 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 164 "offset=%llu, inode=%lu, rec_len=%d, name_len=%d", 165 dir->i_ino, error, folio_pos(folio) + offs, 166 (unsigned long) le32_to_cpu(p->inode), 167 rec_len, p->name_len); 168 goto fail; 169 Eend: 170 if (!quiet) { 171 p = (ext2_dirent *)(kaddr + offs); 172 ext2_error(sb, "ext2_check_folio", 173 "entry in directory #%lu spans the page boundary" 174 "offset=%llu, inode=%lu", 175 dir->i_ino, folio_pos(folio) + offs, 176 (unsigned long) le32_to_cpu(p->inode)); 177 } 178 fail: 179 return false; 180 } 181 182 /* 183 * Calls to ext2_get_folio()/folio_release_kmap() must be nested according 184 * to the rules documented in kmap_local_folio()/kunmap_local(). 185 * 186 * NOTE: ext2_find_entry() and ext2_dotdot() act as a call 187 * to folio_release_kmap() and should be treated as a call to 188 * folio_release_kmap() for nesting purposes. 189 */ 190 static void *ext2_get_folio(struct inode *dir, unsigned long n, 191 int quiet, struct folio **foliop) 192 { 193 struct address_space *mapping = dir->i_mapping; 194 struct folio *folio = read_mapping_folio(mapping, n, NULL); 195 void *kaddr; 196 197 if (IS_ERR(folio)) 198 return ERR_CAST(folio); 199 kaddr = kmap_local_folio(folio, 0); 200 if (unlikely(!folio_test_checked(folio))) { 201 if (!ext2_check_folio(folio, quiet, kaddr)) 202 goto fail; 203 } 204 *foliop = folio; 205 return kaddr; 206 207 fail: 208 folio_release_kmap(folio, kaddr); 209 return ERR_PTR(-EIO); 210 } 211 212 /* 213 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure. 214 * 215 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller. 216 */ 217 static inline int ext2_match (int len, const char * const name, 218 struct ext2_dir_entry_2 * de) 219 { 220 if (len != de->name_len) 221 return 0; 222 if (!de->inode) 223 return 0; 224 return !memcmp(name, de->name, len); 225 } 226 227 /* 228 * p is at least 6 bytes before the end of page 229 */ 230 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) 231 { 232 return (ext2_dirent *)((char *)p + 233 ext2_rec_len_from_disk(p->rec_len)); 234 } 235 236 static inline unsigned 237 ext2_validate_entry(char *base, unsigned offset, unsigned mask) 238 { 239 ext2_dirent *de = (ext2_dirent*)(base + offset); 240 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask)); 241 while ((char*)p < (char*)de) { 242 if (p->rec_len == 0) 243 break; 244 p = ext2_next_entry(p); 245 } 246 return offset_in_page(p); 247 } 248 249 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) 250 { 251 if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) 252 de->file_type = fs_umode_to_ftype(inode->i_mode); 253 else 254 de->file_type = 0; 255 } 256 257 static int 258 ext2_readdir(struct file *file, struct dir_context *ctx) 259 { 260 loff_t pos = ctx->pos; 261 struct inode *inode = file_inode(file); 262 struct super_block *sb = inode->i_sb; 263 unsigned int offset = pos & ~PAGE_MASK; 264 unsigned long n = pos >> PAGE_SHIFT; 265 unsigned long npages = dir_pages(inode); 266 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 267 bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data); 268 bool has_filetype; 269 270 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) 271 return 0; 272 273 has_filetype = 274 EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE); 275 276 for ( ; n < npages; n++, offset = 0) { 277 ext2_dirent *de; 278 struct folio *folio; 279 char *kaddr = ext2_get_folio(inode, n, 0, &folio); 280 char *limit; 281 282 if (IS_ERR(kaddr)) { 283 ext2_error(sb, __func__, 284 "bad page in #%lu", 285 inode->i_ino); 286 ctx->pos += PAGE_SIZE - offset; 287 return PTR_ERR(kaddr); 288 } 289 if (unlikely(need_revalidate)) { 290 if (offset) { 291 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 292 ctx->pos = (n<<PAGE_SHIFT) + offset; 293 } 294 *(u64 *)file->private_data = inode_query_iversion(inode); 295 need_revalidate = false; 296 } 297 de = (ext2_dirent *)(kaddr+offset); 298 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); 299 for ( ;(char*)de <= limit; de = ext2_next_entry(de)) { 300 if (de->rec_len == 0) { 301 ext2_error(sb, __func__, 302 "zero-length directory entry"); 303 folio_release_kmap(folio, de); 304 return -EIO; 305 } 306 if (de->inode) { 307 unsigned char d_type = DT_UNKNOWN; 308 309 if (has_filetype) 310 d_type = fs_ftype_to_dtype(de->file_type); 311 312 if (!dir_emit(ctx, de->name, de->name_len, 313 le32_to_cpu(de->inode), 314 d_type)) { 315 folio_release_kmap(folio, de); 316 return 0; 317 } 318 } 319 ctx->pos += ext2_rec_len_from_disk(de->rec_len); 320 } 321 folio_release_kmap(folio, kaddr); 322 } 323 return 0; 324 } 325 326 /* 327 * ext2_find_entry() 328 * 329 * finds an entry in the specified directory with the wanted name. It 330 * returns the page in which the entry was found (as a parameter - res_page), 331 * and the entry itself. Page is returned mapped and unlocked. 332 * Entry is guaranteed to be valid. 333 * 334 * On Success folio_release_kmap() should be called on *foliop. 335 * 336 * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested 337 * according to the rules documented in kmap_local_folio()/kunmap_local(). 338 * 339 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() 340 * and should be treated as a call to ext2_get_folio() for nesting 341 * purposes. 342 */ 343 struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, 344 const struct qstr *child, struct folio **foliop) 345 { 346 const char *name = child->name; 347 int namelen = child->len; 348 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 349 unsigned long start, n; 350 unsigned long npages = dir_pages(dir); 351 struct ext2_inode_info *ei = EXT2_I(dir); 352 ext2_dirent * de; 353 354 if (npages == 0) 355 goto out; 356 357 start = ei->i_dir_start_lookup; 358 if (start >= npages) 359 start = 0; 360 n = start; 361 do { 362 char *kaddr = ext2_get_folio(dir, n, 0, foliop); 363 if (IS_ERR(kaddr)) 364 return ERR_CAST(kaddr); 365 366 de = (ext2_dirent *) kaddr; 367 kaddr += ext2_last_byte(dir, n) - reclen; 368 while ((char *) de <= kaddr) { 369 if (de->rec_len == 0) { 370 ext2_error(dir->i_sb, __func__, 371 "zero-length directory entry"); 372 folio_release_kmap(*foliop, de); 373 goto out; 374 } 375 if (ext2_match(namelen, name, de)) 376 goto found; 377 de = ext2_next_entry(de); 378 } 379 folio_release_kmap(*foliop, kaddr); 380 381 if (++n >= npages) 382 n = 0; 383 /* next folio is past the blocks we've got */ 384 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { 385 ext2_error(dir->i_sb, __func__, 386 "dir %lu size %lld exceeds block count %llu", 387 dir->i_ino, dir->i_size, 388 (unsigned long long)dir->i_blocks); 389 goto out; 390 } 391 } while (n != start); 392 out: 393 return ERR_PTR(-ENOENT); 394 395 found: 396 ei->i_dir_start_lookup = n; 397 return de; 398 } 399 400 /* 401 * Return the '..' directory entry and the page in which the entry was found 402 * (as a parameter - p). 403 * 404 * On Success folio_release_kmap() should be called on *foliop. 405 * 406 * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested 407 * according to the rules documented in kmap_local_folio()/kunmap_local(). 408 * 409 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() 410 * and should be treated as a call to ext2_get_folio() for nesting 411 * purposes. 412 */ 413 struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop) 414 { 415 ext2_dirent *de = ext2_get_folio(dir, 0, 0, foliop); 416 417 if (!IS_ERR(de)) 418 return ext2_next_entry(de); 419 return NULL; 420 } 421 422 int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) 423 { 424 struct ext2_dir_entry_2 *de; 425 struct folio *folio; 426 427 de = ext2_find_entry(dir, child, &folio); 428 if (IS_ERR(de)) 429 return PTR_ERR(de); 430 431 *ino = le32_to_cpu(de->inode); 432 folio_release_kmap(folio, de); 433 return 0; 434 } 435 436 static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) 437 { 438 return __block_write_begin(folio, pos, len, ext2_get_block); 439 } 440 441 static int ext2_handle_dirsync(struct inode *dir) 442 { 443 int err; 444 445 err = filemap_write_and_wait(dir->i_mapping); 446 if (!err) 447 err = sync_inode_metadata(dir, 1); 448 return err; 449 } 450 451 int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, 452 struct folio *folio, struct inode *inode, bool update_times) 453 { 454 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); 455 unsigned len = ext2_rec_len_from_disk(de->rec_len); 456 int err; 457 458 folio_lock(folio); 459 err = ext2_prepare_chunk(folio, pos, len); 460 if (err) { 461 folio_unlock(folio); 462 return err; 463 } 464 de->inode = cpu_to_le32(inode->i_ino); 465 ext2_set_de_type(de, inode); 466 ext2_commit_chunk(folio, pos, len); 467 if (update_times) 468 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 469 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 470 mark_inode_dirty(dir); 471 return ext2_handle_dirsync(dir); 472 } 473 474 /* 475 * Parent is locked. 476 */ 477 int ext2_add_link (struct dentry *dentry, struct inode *inode) 478 { 479 struct inode *dir = d_inode(dentry->d_parent); 480 const char *name = dentry->d_name.name; 481 int namelen = dentry->d_name.len; 482 unsigned chunk_size = ext2_chunk_size(dir); 483 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 484 unsigned short rec_len, name_len; 485 struct folio *folio = NULL; 486 ext2_dirent * de; 487 unsigned long npages = dir_pages(dir); 488 unsigned long n; 489 loff_t pos; 490 int err; 491 492 /* 493 * We take care of directory expansion in the same loop. 494 * This code plays outside i_size, so it locks the folio 495 * to protect that region. 496 */ 497 for (n = 0; n <= npages; n++) { 498 char *kaddr = ext2_get_folio(dir, n, 0, &folio); 499 char *dir_end; 500 501 if (IS_ERR(kaddr)) 502 return PTR_ERR(kaddr); 503 folio_lock(folio); 504 dir_end = kaddr + ext2_last_byte(dir, n); 505 de = (ext2_dirent *)kaddr; 506 kaddr += folio_size(folio) - reclen; 507 while ((char *)de <= kaddr) { 508 if ((char *)de == dir_end) { 509 /* We hit i_size */ 510 name_len = 0; 511 rec_len = chunk_size; 512 de->rec_len = ext2_rec_len_to_disk(chunk_size); 513 de->inode = 0; 514 goto got_it; 515 } 516 if (de->rec_len == 0) { 517 ext2_error(dir->i_sb, __func__, 518 "zero-length directory entry"); 519 err = -EIO; 520 goto out_unlock; 521 } 522 err = -EEXIST; 523 if (ext2_match (namelen, name, de)) 524 goto out_unlock; 525 name_len = EXT2_DIR_REC_LEN(de->name_len); 526 rec_len = ext2_rec_len_from_disk(de->rec_len); 527 if (!de->inode && rec_len >= reclen) 528 goto got_it; 529 if (rec_len >= name_len + reclen) 530 goto got_it; 531 de = (ext2_dirent *) ((char *) de + rec_len); 532 } 533 folio_unlock(folio); 534 folio_release_kmap(folio, kaddr); 535 } 536 BUG(); 537 return -EINVAL; 538 539 got_it: 540 pos = folio_pos(folio) + offset_in_folio(folio, de); 541 err = ext2_prepare_chunk(folio, pos, rec_len); 542 if (err) 543 goto out_unlock; 544 if (de->inode) { 545 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); 546 de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len); 547 de->rec_len = ext2_rec_len_to_disk(name_len); 548 de = de1; 549 } 550 de->name_len = namelen; 551 memcpy(de->name, name, namelen); 552 de->inode = cpu_to_le32(inode->i_ino); 553 ext2_set_de_type (de, inode); 554 ext2_commit_chunk(folio, pos, rec_len); 555 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 556 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 557 mark_inode_dirty(dir); 558 err = ext2_handle_dirsync(dir); 559 /* OFFSET_CACHE */ 560 out_put: 561 folio_release_kmap(folio, de); 562 return err; 563 out_unlock: 564 folio_unlock(folio); 565 goto out_put; 566 } 567 568 /* 569 * ext2_delete_entry deletes a directory entry by merging it with the 570 * previous entry. Page is up-to-date. 571 */ 572 int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio) 573 { 574 struct inode *inode = folio->mapping->host; 575 size_t from, to; 576 char *kaddr; 577 loff_t pos; 578 ext2_dirent *de, *pde = NULL; 579 int err; 580 581 from = offset_in_folio(folio, dir); 582 to = from + ext2_rec_len_from_disk(dir->rec_len); 583 kaddr = (char *)dir - from; 584 from &= ~(ext2_chunk_size(inode)-1); 585 de = (ext2_dirent *)(kaddr + from); 586 587 while ((char*)de < (char*)dir) { 588 if (de->rec_len == 0) { 589 ext2_error(inode->i_sb, __func__, 590 "zero-length directory entry"); 591 return -EIO; 592 } 593 pde = de; 594 de = ext2_next_entry(de); 595 } 596 if (pde) 597 from = offset_in_folio(folio, pde); 598 pos = folio_pos(folio) + from; 599 folio_lock(folio); 600 err = ext2_prepare_chunk(folio, pos, to - from); 601 if (err) { 602 folio_unlock(folio); 603 return err; 604 } 605 if (pde) 606 pde->rec_len = ext2_rec_len_to_disk(to - from); 607 dir->inode = 0; 608 ext2_commit_chunk(folio, pos, to - from); 609 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 610 EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; 611 mark_inode_dirty(inode); 612 return ext2_handle_dirsync(inode); 613 } 614 615 /* 616 * Set the first fragment of directory. 617 */ 618 int ext2_make_empty(struct inode *inode, struct inode *parent) 619 { 620 struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); 621 unsigned chunk_size = ext2_chunk_size(inode); 622 struct ext2_dir_entry_2 * de; 623 int err; 624 void *kaddr; 625 626 if (IS_ERR(folio)) 627 return PTR_ERR(folio); 628 629 err = ext2_prepare_chunk(folio, 0, chunk_size); 630 if (err) { 631 folio_unlock(folio); 632 goto fail; 633 } 634 kaddr = kmap_local_folio(folio, 0); 635 memset(kaddr, 0, chunk_size); 636 de = (struct ext2_dir_entry_2 *)kaddr; 637 de->name_len = 1; 638 de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1)); 639 memcpy (de->name, ".\0\0", 4); 640 de->inode = cpu_to_le32(inode->i_ino); 641 ext2_set_de_type (de, inode); 642 643 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); 644 de->name_len = 2; 645 de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1)); 646 de->inode = cpu_to_le32(parent->i_ino); 647 memcpy (de->name, "..\0", 4); 648 ext2_set_de_type (de, inode); 649 kunmap_local(kaddr); 650 ext2_commit_chunk(folio, 0, chunk_size); 651 err = ext2_handle_dirsync(inode); 652 fail: 653 folio_put(folio); 654 return err; 655 } 656 657 /* 658 * routine to check that the specified directory is empty (for rmdir) 659 */ 660 int ext2_empty_dir(struct inode *inode) 661 { 662 struct folio *folio; 663 char *kaddr; 664 unsigned long i, npages = dir_pages(inode); 665 666 for (i = 0; i < npages; i++) { 667 ext2_dirent *de; 668 669 kaddr = ext2_get_folio(inode, i, 0, &folio); 670 if (IS_ERR(kaddr)) 671 return 0; 672 673 de = (ext2_dirent *)kaddr; 674 kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1); 675 676 while ((char *)de <= kaddr) { 677 if (de->rec_len == 0) { 678 ext2_error(inode->i_sb, __func__, 679 "zero-length directory entry"); 680 printk("kaddr=%p, de=%p\n", kaddr, de); 681 goto not_empty; 682 } 683 if (de->inode != 0) { 684 /* check for . and .. */ 685 if (de->name[0] != '.') 686 goto not_empty; 687 if (de->name_len > 2) 688 goto not_empty; 689 if (de->name_len < 2) { 690 if (de->inode != 691 cpu_to_le32(inode->i_ino)) 692 goto not_empty; 693 } else if (de->name[1] != '.') 694 goto not_empty; 695 } 696 de = ext2_next_entry(de); 697 } 698 folio_release_kmap(folio, kaddr); 699 } 700 return 1; 701 702 not_empty: 703 folio_release_kmap(folio, kaddr); 704 return 0; 705 } 706 707 static int ext2_dir_open(struct inode *inode, struct file *file) 708 { 709 file->private_data = kzalloc(sizeof(u64), GFP_KERNEL); 710 if (!file->private_data) 711 return -ENOMEM; 712 return 0; 713 } 714 715 static int ext2_dir_release(struct inode *inode, struct file *file) 716 { 717 kfree(file->private_data); 718 return 0; 719 } 720 721 static loff_t ext2_dir_llseek(struct file *file, loff_t offset, int whence) 722 { 723 return generic_llseek_cookie(file, offset, whence, 724 (u64 *)file->private_data); 725 } 726 727 const struct file_operations ext2_dir_operations = { 728 .open = ext2_dir_open, 729 .release = ext2_dir_release, 730 .llseek = ext2_dir_llseek, 731 .read = generic_read_dir, 732 .iterate_shared = ext2_readdir, 733 .unlocked_ioctl = ext2_ioctl, 734 #ifdef CONFIG_COMPAT 735 .compat_ioctl = ext2_compat_ioctl, 736 #endif 737 .fsync = ext2_fsync, 738 .setlease = generic_setlease, 739 }; 740