1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/dir.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/dir.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * ext2 directory handling functions 17 * 18 * Big-endian to little-endian byte-swapping/bitmaps by 19 * David S. Miller (davem@caip.rutgers.edu), 1995 20 * 21 * All code that works with directory layout had been switched to pagecache 22 * and moved here. AV 23 */ 24 25 #include "ext2.h" 26 #include <linux/buffer_head.h> 27 #include <linux/pagemap.h> 28 #include <linux/swap.h> 29 #include <linux/iversion.h> 30 31 typedef struct ext2_dir_entry_2 ext2_dirent; 32 33 /* 34 * Tests against MAX_REC_LEN etc were put in place for 64k block 35 * sizes; if that is not possible on this arch, we can skip 36 * those tests and speed things up. 37 */ 38 static inline unsigned ext2_rec_len_from_disk(__le16 dlen) 39 { 40 unsigned len = le16_to_cpu(dlen); 41 42 #if (PAGE_SIZE >= 65536) 43 if (len == EXT2_MAX_REC_LEN) 44 return 1 << 16; 45 #endif 46 return len; 47 } 48 49 static inline __le16 ext2_rec_len_to_disk(unsigned len) 50 { 51 #if (PAGE_SIZE >= 65536) 52 if (len == (1 << 16)) 53 return cpu_to_le16(EXT2_MAX_REC_LEN); 54 else 55 BUG_ON(len > (1 << 16)); 56 #endif 57 return cpu_to_le16(len); 58 } 59 60 /* 61 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be 62 * more robust, but we have what we have 63 */ 64 static inline unsigned ext2_chunk_size(struct inode *inode) 65 { 66 return inode->i_sb->s_blocksize; 67 } 68 69 /* 70 * Return the offset into page `page_nr' of the last valid 71 * byte in that page, plus one. 72 */ 73 static unsigned 74 ext2_last_byte(struct inode *inode, unsigned long page_nr) 75 { 76 unsigned last_byte = inode->i_size; 77 78 last_byte -= page_nr << PAGE_SHIFT; 79 if (last_byte > PAGE_SIZE) 80 last_byte = PAGE_SIZE; 81 return last_byte; 82 } 83 84 static void ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) 85 { 86 struct address_space *mapping = page->mapping; 87 struct inode *dir = mapping->host; 88 89 inode_inc_iversion(dir); 90 block_write_end(NULL, mapping, pos, len, len, page, NULL); 91 92 if (pos+len > dir->i_size) { 93 i_size_write(dir, pos+len); 94 mark_inode_dirty(dir); 95 } 96 unlock_page(page); 97 } 98 99 static bool ext2_check_page(struct page *page, int quiet, char *kaddr) 100 { 101 struct inode *dir = page->mapping->host; 102 struct super_block *sb = dir->i_sb; 103 unsigned chunk_size = ext2_chunk_size(dir); 104 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 105 unsigned offs, rec_len; 106 unsigned limit = PAGE_SIZE; 107 ext2_dirent *p; 108 char *error; 109 110 if ((dir->i_size >> PAGE_SHIFT) == page->index) { 111 limit = dir->i_size & ~PAGE_MASK; 112 if (limit & (chunk_size - 1)) 113 goto Ebadsize; 114 if (!limit) 115 goto out; 116 } 117 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { 118 p = (ext2_dirent *)(kaddr + offs); 119 rec_len = ext2_rec_len_from_disk(p->rec_len); 120 121 if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) 122 goto Eshort; 123 if (unlikely(rec_len & 3)) 124 goto Ealign; 125 if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) 126 goto Enamelen; 127 if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) 128 goto Espan; 129 if (unlikely(le32_to_cpu(p->inode) > max_inumber)) 130 goto Einumber; 131 } 132 if (offs != limit) 133 goto Eend; 134 out: 135 SetPageChecked(page); 136 return true; 137 138 /* Too bad, we had an error */ 139 140 Ebadsize: 141 if (!quiet) 142 ext2_error(sb, __func__, 143 "size of directory #%lu is not a multiple " 144 "of chunk size", dir->i_ino); 145 goto fail; 146 Eshort: 147 error = "rec_len is smaller than minimal"; 148 goto bad_entry; 149 Ealign: 150 error = "unaligned directory entry"; 151 goto bad_entry; 152 Enamelen: 153 error = "rec_len is too small for name_len"; 154 goto bad_entry; 155 Espan: 156 error = "directory entry across blocks"; 157 goto bad_entry; 158 Einumber: 159 error = "inode out of bounds"; 160 bad_entry: 161 if (!quiet) 162 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 163 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 164 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 165 (unsigned long) le32_to_cpu(p->inode), 166 rec_len, p->name_len); 167 goto fail; 168 Eend: 169 if (!quiet) { 170 p = (ext2_dirent *)(kaddr + offs); 171 ext2_error(sb, "ext2_check_page", 172 "entry in directory #%lu spans the page boundary" 173 "offset=%lu, inode=%lu", 174 dir->i_ino, (page->index<<PAGE_SHIFT)+offs, 175 (unsigned long) le32_to_cpu(p->inode)); 176 } 177 fail: 178 SetPageError(page); 179 return false; 180 } 181 182 /* 183 * Calls to ext2_get_page()/ext2_put_page() must be nested according to the 184 * rules documented in kmap_local_page()/kunmap_local(). 185 * 186 * NOTE: ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() 187 * and should be treated as a call to ext2_get_page() for nesting purposes. 188 */ 189 static struct page * ext2_get_page(struct inode *dir, unsigned long n, 190 int quiet, void **page_addr) 191 { 192 struct address_space *mapping = dir->i_mapping; 193 struct folio *folio = read_mapping_folio(mapping, n, NULL); 194 195 if (IS_ERR(folio)) 196 return &folio->page; 197 *page_addr = kmap_local_folio(folio, n & (folio_nr_pages(folio) - 1)); 198 if (unlikely(!folio_test_checked(folio))) { 199 if (!ext2_check_page(&folio->page, quiet, *page_addr)) 200 goto fail; 201 } 202 return &folio->page; 203 204 fail: 205 ext2_put_page(&folio->page, *page_addr); 206 return ERR_PTR(-EIO); 207 } 208 209 /* 210 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure. 211 * 212 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller. 213 */ 214 static inline int ext2_match (int len, const char * const name, 215 struct ext2_dir_entry_2 * de) 216 { 217 if (len != de->name_len) 218 return 0; 219 if (!de->inode) 220 return 0; 221 return !memcmp(name, de->name, len); 222 } 223 224 /* 225 * p is at least 6 bytes before the end of page 226 */ 227 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) 228 { 229 return (ext2_dirent *)((char *)p + 230 ext2_rec_len_from_disk(p->rec_len)); 231 } 232 233 static inline unsigned 234 ext2_validate_entry(char *base, unsigned offset, unsigned mask) 235 { 236 ext2_dirent *de = (ext2_dirent*)(base + offset); 237 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask)); 238 while ((char*)p < (char*)de) { 239 if (p->rec_len == 0) 240 break; 241 p = ext2_next_entry(p); 242 } 243 return (char *)p - base; 244 } 245 246 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) 247 { 248 if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) 249 de->file_type = fs_umode_to_ftype(inode->i_mode); 250 else 251 de->file_type = 0; 252 } 253 254 static int 255 ext2_readdir(struct file *file, struct dir_context *ctx) 256 { 257 loff_t pos = ctx->pos; 258 struct inode *inode = file_inode(file); 259 struct super_block *sb = inode->i_sb; 260 unsigned int offset = pos & ~PAGE_MASK; 261 unsigned long n = pos >> PAGE_SHIFT; 262 unsigned long npages = dir_pages(inode); 263 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 264 bool need_revalidate = !inode_eq_iversion(inode, file->f_version); 265 bool has_filetype; 266 267 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) 268 return 0; 269 270 has_filetype = 271 EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE); 272 273 for ( ; n < npages; n++, offset = 0) { 274 char *kaddr, *limit; 275 ext2_dirent *de; 276 struct page *page = ext2_get_page(inode, n, 0, (void **)&kaddr); 277 278 if (IS_ERR(page)) { 279 ext2_error(sb, __func__, 280 "bad page in #%lu", 281 inode->i_ino); 282 ctx->pos += PAGE_SIZE - offset; 283 return PTR_ERR(page); 284 } 285 if (unlikely(need_revalidate)) { 286 if (offset) { 287 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 288 ctx->pos = (n<<PAGE_SHIFT) + offset; 289 } 290 file->f_version = inode_query_iversion(inode); 291 need_revalidate = false; 292 } 293 de = (ext2_dirent *)(kaddr+offset); 294 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); 295 for ( ;(char*)de <= limit; de = ext2_next_entry(de)) { 296 if (de->rec_len == 0) { 297 ext2_error(sb, __func__, 298 "zero-length directory entry"); 299 ext2_put_page(page, kaddr); 300 return -EIO; 301 } 302 if (de->inode) { 303 unsigned char d_type = DT_UNKNOWN; 304 305 if (has_filetype) 306 d_type = fs_ftype_to_dtype(de->file_type); 307 308 if (!dir_emit(ctx, de->name, de->name_len, 309 le32_to_cpu(de->inode), 310 d_type)) { 311 ext2_put_page(page, kaddr); 312 return 0; 313 } 314 } 315 ctx->pos += ext2_rec_len_from_disk(de->rec_len); 316 } 317 ext2_put_page(page, kaddr); 318 } 319 return 0; 320 } 321 322 /* 323 * ext2_find_entry() 324 * 325 * finds an entry in the specified directory with the wanted name. It 326 * returns the page in which the entry was found (as a parameter - res_page), 327 * and the entry itself. Page is returned mapped and unlocked. 328 * Entry is guaranteed to be valid. 329 * 330 * On Success ext2_put_page() should be called on *res_page. 331 * 332 * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to 333 * the rules documented in kmap_local_page()/kunmap_local(). 334 * 335 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and 336 * should be treated as a call to ext2_get_page() for nesting purposes. 337 */ 338 struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, 339 const struct qstr *child, struct page **res_page, 340 void **res_page_addr) 341 { 342 const char *name = child->name; 343 int namelen = child->len; 344 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 345 unsigned long start, n; 346 unsigned long npages = dir_pages(dir); 347 struct page *page = NULL; 348 struct ext2_inode_info *ei = EXT2_I(dir); 349 ext2_dirent * de; 350 void *page_addr; 351 352 if (npages == 0) 353 goto out; 354 355 /* OFFSET_CACHE */ 356 *res_page = NULL; 357 *res_page_addr = NULL; 358 359 start = ei->i_dir_start_lookup; 360 if (start >= npages) 361 start = 0; 362 n = start; 363 do { 364 char *kaddr; 365 page = ext2_get_page(dir, n, 0, &page_addr); 366 if (IS_ERR(page)) 367 return ERR_CAST(page); 368 369 kaddr = page_addr; 370 de = (ext2_dirent *) kaddr; 371 kaddr += ext2_last_byte(dir, n) - reclen; 372 while ((char *) de <= kaddr) { 373 if (de->rec_len == 0) { 374 ext2_error(dir->i_sb, __func__, 375 "zero-length directory entry"); 376 ext2_put_page(page, page_addr); 377 goto out; 378 } 379 if (ext2_match(namelen, name, de)) 380 goto found; 381 de = ext2_next_entry(de); 382 } 383 ext2_put_page(page, page_addr); 384 385 if (++n >= npages) 386 n = 0; 387 /* next page is past the blocks we've got */ 388 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { 389 ext2_error(dir->i_sb, __func__, 390 "dir %lu size %lld exceeds block count %llu", 391 dir->i_ino, dir->i_size, 392 (unsigned long long)dir->i_blocks); 393 goto out; 394 } 395 } while (n != start); 396 out: 397 return ERR_PTR(-ENOENT); 398 399 found: 400 *res_page = page; 401 *res_page_addr = page_addr; 402 ei->i_dir_start_lookup = n; 403 return de; 404 } 405 406 /* 407 * Return the '..' directory entry and the page in which the entry was found 408 * (as a parameter - p). 409 * 410 * On Success ext2_put_page() should be called on *p. 411 * 412 * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to 413 * the rules documented in kmap_local_page()/kunmap_local(). 414 * 415 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and 416 * should be treated as a call to ext2_get_page() for nesting purposes. 417 */ 418 struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, 419 void **pa) 420 { 421 void *page_addr; 422 struct page *page = ext2_get_page(dir, 0, 0, &page_addr); 423 ext2_dirent *de = NULL; 424 425 if (!IS_ERR(page)) { 426 de = ext2_next_entry((ext2_dirent *) page_addr); 427 *p = page; 428 *pa = page_addr; 429 } 430 return de; 431 } 432 433 int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) 434 { 435 struct ext2_dir_entry_2 *de; 436 struct page *page; 437 void *page_addr; 438 439 de = ext2_find_entry(dir, child, &page, &page_addr); 440 if (IS_ERR(de)) 441 return PTR_ERR(de); 442 443 *ino = le32_to_cpu(de->inode); 444 ext2_put_page(page, page_addr); 445 return 0; 446 } 447 448 static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) 449 { 450 return __block_write_begin(page, pos, len, ext2_get_block); 451 } 452 453 454 static int ext2_handle_dirsync(struct inode *dir) 455 { 456 int err; 457 458 err = filemap_write_and_wait(dir->i_mapping); 459 if (!err) 460 err = sync_inode_metadata(dir, 1); 461 return err; 462 } 463 464 int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, 465 struct page *page, void *page_addr, struct inode *inode, 466 bool update_times) 467 { 468 loff_t pos = page_offset(page) + 469 (char *) de - (char *) page_addr; 470 unsigned len = ext2_rec_len_from_disk(de->rec_len); 471 int err; 472 473 lock_page(page); 474 err = ext2_prepare_chunk(page, pos, len); 475 if (err) { 476 unlock_page(page); 477 return err; 478 } 479 de->inode = cpu_to_le32(inode->i_ino); 480 ext2_set_de_type(de, inode); 481 ext2_commit_chunk(page, pos, len); 482 if (update_times) 483 dir->i_mtime = dir->i_ctime = current_time(dir); 484 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 485 mark_inode_dirty(dir); 486 return ext2_handle_dirsync(dir); 487 } 488 489 /* 490 * Parent is locked. 491 */ 492 int ext2_add_link (struct dentry *dentry, struct inode *inode) 493 { 494 struct inode *dir = d_inode(dentry->d_parent); 495 const char *name = dentry->d_name.name; 496 int namelen = dentry->d_name.len; 497 unsigned chunk_size = ext2_chunk_size(dir); 498 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 499 unsigned short rec_len, name_len; 500 struct page *page = NULL; 501 void *page_addr = NULL; 502 ext2_dirent * de; 503 unsigned long npages = dir_pages(dir); 504 unsigned long n; 505 loff_t pos; 506 int err; 507 508 /* 509 * We take care of directory expansion in the same loop. 510 * This code plays outside i_size, so it locks the page 511 * to protect that region. 512 */ 513 for (n = 0; n <= npages; n++) { 514 char *kaddr; 515 char *dir_end; 516 517 page = ext2_get_page(dir, n, 0, &page_addr); 518 err = PTR_ERR(page); 519 if (IS_ERR(page)) 520 goto out; 521 lock_page(page); 522 kaddr = page_addr; 523 dir_end = kaddr + ext2_last_byte(dir, n); 524 de = (ext2_dirent *)kaddr; 525 kaddr += PAGE_SIZE - reclen; 526 while ((char *)de <= kaddr) { 527 if ((char *)de == dir_end) { 528 /* We hit i_size */ 529 name_len = 0; 530 rec_len = chunk_size; 531 de->rec_len = ext2_rec_len_to_disk(chunk_size); 532 de->inode = 0; 533 goto got_it; 534 } 535 if (de->rec_len == 0) { 536 ext2_error(dir->i_sb, __func__, 537 "zero-length directory entry"); 538 err = -EIO; 539 goto out_unlock; 540 } 541 err = -EEXIST; 542 if (ext2_match (namelen, name, de)) 543 goto out_unlock; 544 name_len = EXT2_DIR_REC_LEN(de->name_len); 545 rec_len = ext2_rec_len_from_disk(de->rec_len); 546 if (!de->inode && rec_len >= reclen) 547 goto got_it; 548 if (rec_len >= name_len + reclen) 549 goto got_it; 550 de = (ext2_dirent *) ((char *) de + rec_len); 551 } 552 unlock_page(page); 553 ext2_put_page(page, page_addr); 554 } 555 BUG(); 556 return -EINVAL; 557 558 got_it: 559 pos = page_offset(page) + 560 (char *)de - (char *)page_addr; 561 err = ext2_prepare_chunk(page, pos, rec_len); 562 if (err) 563 goto out_unlock; 564 if (de->inode) { 565 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); 566 de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len); 567 de->rec_len = ext2_rec_len_to_disk(name_len); 568 de = de1; 569 } 570 de->name_len = namelen; 571 memcpy(de->name, name, namelen); 572 de->inode = cpu_to_le32(inode->i_ino); 573 ext2_set_de_type (de, inode); 574 ext2_commit_chunk(page, pos, rec_len); 575 dir->i_mtime = dir->i_ctime = current_time(dir); 576 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 577 mark_inode_dirty(dir); 578 err = ext2_handle_dirsync(dir); 579 /* OFFSET_CACHE */ 580 out_put: 581 ext2_put_page(page, page_addr); 582 out: 583 return err; 584 out_unlock: 585 unlock_page(page); 586 goto out_put; 587 } 588 589 /* 590 * ext2_delete_entry deletes a directory entry by merging it with the 591 * previous entry. Page is up-to-date. 592 */ 593 int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page, 594 char *kaddr) 595 { 596 struct inode *inode = page->mapping->host; 597 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); 598 unsigned to = ((char *)dir - kaddr) + 599 ext2_rec_len_from_disk(dir->rec_len); 600 loff_t pos; 601 ext2_dirent * pde = NULL; 602 ext2_dirent * de = (ext2_dirent *) (kaddr + from); 603 int err; 604 605 while ((char*)de < (char*)dir) { 606 if (de->rec_len == 0) { 607 ext2_error(inode->i_sb, __func__, 608 "zero-length directory entry"); 609 err = -EIO; 610 goto out; 611 } 612 pde = de; 613 de = ext2_next_entry(de); 614 } 615 if (pde) 616 from = (char *)pde - kaddr; 617 pos = page_offset(page) + from; 618 lock_page(page); 619 err = ext2_prepare_chunk(page, pos, to - from); 620 BUG_ON(err); 621 if (pde) 622 pde->rec_len = ext2_rec_len_to_disk(to - from); 623 dir->inode = 0; 624 ext2_commit_chunk(page, pos, to - from); 625 inode->i_ctime = inode->i_mtime = current_time(inode); 626 EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; 627 mark_inode_dirty(inode); 628 err = ext2_handle_dirsync(inode); 629 out: 630 return err; 631 } 632 633 /* 634 * Set the first fragment of directory. 635 */ 636 int ext2_make_empty(struct inode *inode, struct inode *parent) 637 { 638 struct page *page = grab_cache_page(inode->i_mapping, 0); 639 unsigned chunk_size = ext2_chunk_size(inode); 640 struct ext2_dir_entry_2 * de; 641 int err; 642 void *kaddr; 643 644 if (!page) 645 return -ENOMEM; 646 647 err = ext2_prepare_chunk(page, 0, chunk_size); 648 if (err) { 649 unlock_page(page); 650 goto fail; 651 } 652 kaddr = kmap_local_page(page); 653 memset(kaddr, 0, chunk_size); 654 de = (struct ext2_dir_entry_2 *)kaddr; 655 de->name_len = 1; 656 de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1)); 657 memcpy (de->name, ".\0\0", 4); 658 de->inode = cpu_to_le32(inode->i_ino); 659 ext2_set_de_type (de, inode); 660 661 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); 662 de->name_len = 2; 663 de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1)); 664 de->inode = cpu_to_le32(parent->i_ino); 665 memcpy (de->name, "..\0", 4); 666 ext2_set_de_type (de, inode); 667 kunmap_local(kaddr); 668 ext2_commit_chunk(page, 0, chunk_size); 669 err = ext2_handle_dirsync(inode); 670 fail: 671 put_page(page); 672 return err; 673 } 674 675 /* 676 * routine to check that the specified directory is empty (for rmdir) 677 */ 678 int ext2_empty_dir (struct inode * inode) 679 { 680 void *page_addr = NULL; 681 struct page *page = NULL; 682 unsigned long i, npages = dir_pages(inode); 683 684 for (i = 0; i < npages; i++) { 685 char *kaddr; 686 ext2_dirent * de; 687 page = ext2_get_page(inode, i, 0, &page_addr); 688 689 if (IS_ERR(page)) 690 return 0; 691 692 kaddr = page_addr; 693 de = (ext2_dirent *)kaddr; 694 kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1); 695 696 while ((char *)de <= kaddr) { 697 if (de->rec_len == 0) { 698 ext2_error(inode->i_sb, __func__, 699 "zero-length directory entry"); 700 printk("kaddr=%p, de=%p\n", kaddr, de); 701 goto not_empty; 702 } 703 if (de->inode != 0) { 704 /* check for . and .. */ 705 if (de->name[0] != '.') 706 goto not_empty; 707 if (de->name_len > 2) 708 goto not_empty; 709 if (de->name_len < 2) { 710 if (de->inode != 711 cpu_to_le32(inode->i_ino)) 712 goto not_empty; 713 } else if (de->name[1] != '.') 714 goto not_empty; 715 } 716 de = ext2_next_entry(de); 717 } 718 ext2_put_page(page, page_addr); 719 } 720 return 1; 721 722 not_empty: 723 ext2_put_page(page, page_addr); 724 return 0; 725 } 726 727 const struct file_operations ext2_dir_operations = { 728 .llseek = generic_file_llseek, 729 .read = generic_read_dir, 730 .iterate_shared = ext2_readdir, 731 .unlocked_ioctl = ext2_ioctl, 732 #ifdef CONFIG_COMPAT 733 .compat_ioctl = ext2_compat_ioctl, 734 #endif 735 .fsync = ext2_fsync, 736 }; 737