1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Implements Extendible Hashing as described in: 12 * "Extendible Hashing" by Fagin, et al in 13 * __ACM Trans. on Database Systems__, Sept 1979. 14 * 15 * 16 * Here's the layout of dirents which is essentially the same as that of ext2 17 * within a single block. The field de_name_len is the number of bytes 18 * actually required for the name (no null terminator). The field de_rec_len 19 * is the number of bytes allocated to the dirent. The offset of the next 20 * dirent in the block is (dirent + dirent->de_rec_len). When a dirent is 21 * deleted, the preceding dirent inherits its allocated space, ie 22 * prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained 23 * by adding de_rec_len to the current dirent, this essentially causes the 24 * deleted dirent to get jumped over when iterating through all the dirents. 25 * 26 * When deleting the first dirent in a block, there is no previous dirent so 27 * the field de_ino is set to zero to designate it as deleted. When allocating 28 * a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the 29 * first dirent has (de_ino == 0) and de_rec_len is large enough, this first 30 * dirent is allocated. Otherwise it must go through all the 'used' dirents 31 * searching for one in which the amount of total space minus the amount of 32 * used space will provide enough space for the new dirent. 33 * 34 * There are two types of blocks in which dirents reside. In a stuffed dinode, 35 * the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of 36 * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the 37 * beginning of the leaf block. The dirents reside in leaves when 38 * 39 * dip->i_diskflags & GFS2_DIF_EXHASH is true 40 * 41 * Otherwise, the dirents are "linear", within a single stuffed dinode block. 42 * 43 * When the dirents are in leaves, the actual contents of the directory file are 44 * used as an array of 64-bit block pointers pointing to the leaf blocks. The 45 * dirents are NOT in the directory file itself. There can be more than one 46 * block pointer in the array that points to the same leaf. In fact, when a 47 * directory is first converted from linear to exhash, all of the pointers 48 * point to the same leaf. 49 * 50 * When a leaf is completely full, the size of the hash table can be 51 * doubled unless it is already at the maximum size which is hard coded into 52 * GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list, 53 * but never before the maximum hash table size has been reached. 54 */ 55 56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 57 58 #include <linux/slab.h> 59 #include <linux/spinlock.h> 60 #include <linux/buffer_head.h> 61 #include <linux/sort.h> 62 #include <linux/gfs2_ondisk.h> 63 #include <linux/crc32.h> 64 #include <linux/vmalloc.h> 65 66 #include "gfs2.h" 67 #include "incore.h" 68 #include "dir.h" 69 #include "glock.h" 70 #include "inode.h" 71 #include "meta_io.h" 72 #include "quota.h" 73 #include "rgrp.h" 74 #include "trans.h" 75 #include "bmap.h" 76 #include "util.h" 77 78 #define IS_LEAF 1 /* Hashed (leaf) directory */ 79 #define IS_DINODE 2 /* Linear (stuffed dinode block) directory */ 80 81 #define MAX_RA_BLOCKS 32 /* max read-ahead blocks */ 82 83 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) 84 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) 85 #define GFS2_HASH_INDEX_MASK 0xffffc000 86 #define GFS2_USE_HASH_FLAG 0x2000 87 88 struct qstr gfs2_qdot __read_mostly; 89 struct qstr gfs2_qdotdot __read_mostly; 90 91 typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, 92 const struct qstr *name, void *opaque); 93 94 int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, 95 struct buffer_head **bhp) 96 { 97 struct buffer_head *bh; 98 99 bh = gfs2_meta_new(ip->i_gl, block); 100 gfs2_trans_add_meta(ip->i_gl, bh); 101 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD); 102 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); 103 *bhp = bh; 104 return 0; 105 } 106 107 static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block, 108 struct buffer_head **bhp) 109 { 110 struct buffer_head *bh; 111 int error; 112 113 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh); 114 if (error) 115 return error; 116 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { 117 brelse(bh); 118 return -EIO; 119 } 120 *bhp = bh; 121 return 0; 122 } 123 124 static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, 125 unsigned int offset, unsigned int size) 126 { 127 struct buffer_head *dibh; 128 int error; 129 130 error = gfs2_meta_inode_buffer(ip, &dibh); 131 if (error) 132 return error; 133 134 gfs2_trans_add_meta(ip->i_gl, dibh); 135 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 136 if (ip->i_inode.i_size < offset + size) 137 i_size_write(&ip->i_inode, offset + size); 138 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 139 gfs2_dinode_out(ip, dibh->b_data); 140 141 brelse(dibh); 142 143 return size; 144 } 145 146 147 148 /** 149 * gfs2_dir_write_data - Write directory information to the inode 150 * @ip: The GFS2 inode 151 * @buf: The buffer containing information to be written 152 * @offset: The file offset to start writing at 153 * @size: The amount of data to write 154 * 155 * Returns: The number of bytes correctly written or error code 156 */ 157 static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf, 158 u64 offset, unsigned int size) 159 { 160 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 161 struct buffer_head *dibh; 162 u64 lblock, dblock; 163 u32 extlen = 0; 164 unsigned int o; 165 int copied = 0; 166 int error = 0; 167 int new = 0; 168 169 if (!size) 170 return 0; 171 172 if (gfs2_is_stuffed(ip) && 173 offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) 174 return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset, 175 size); 176 177 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) 178 return -EINVAL; 179 180 if (gfs2_is_stuffed(ip)) { 181 error = gfs2_unstuff_dinode(ip, NULL); 182 if (error) 183 return error; 184 } 185 186 lblock = offset; 187 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); 188 189 while (copied < size) { 190 unsigned int amount; 191 struct buffer_head *bh; 192 193 amount = size - copied; 194 if (amount > sdp->sd_sb.sb_bsize - o) 195 amount = sdp->sd_sb.sb_bsize - o; 196 197 if (!extlen) { 198 new = 1; 199 error = gfs2_extent_map(&ip->i_inode, lblock, &new, 200 &dblock, &extlen); 201 if (error) 202 goto fail; 203 error = -EIO; 204 if (gfs2_assert_withdraw(sdp, dblock)) 205 goto fail; 206 } 207 208 if (amount == sdp->sd_jbsize || new) 209 error = gfs2_dir_get_new_buffer(ip, dblock, &bh); 210 else 211 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh); 212 213 if (error) 214 goto fail; 215 216 gfs2_trans_add_meta(ip->i_gl, bh); 217 memcpy(bh->b_data + o, buf, amount); 218 brelse(bh); 219 220 buf += amount; 221 copied += amount; 222 lblock++; 223 dblock++; 224 extlen--; 225 226 o = sizeof(struct gfs2_meta_header); 227 } 228 229 out: 230 error = gfs2_meta_inode_buffer(ip, &dibh); 231 if (error) 232 return error; 233 234 if (ip->i_inode.i_size < offset + copied) 235 i_size_write(&ip->i_inode, offset + copied); 236 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 237 238 gfs2_trans_add_meta(ip->i_gl, dibh); 239 gfs2_dinode_out(ip, dibh->b_data); 240 brelse(dibh); 241 242 return copied; 243 fail: 244 if (copied) 245 goto out; 246 return error; 247 } 248 249 static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf, 250 unsigned int size) 251 { 252 struct buffer_head *dibh; 253 int error; 254 255 error = gfs2_meta_inode_buffer(ip, &dibh); 256 if (!error) { 257 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size); 258 brelse(dibh); 259 } 260 261 return (error) ? error : size; 262 } 263 264 265 /** 266 * gfs2_dir_read_data - Read a data from a directory inode 267 * @ip: The GFS2 Inode 268 * @buf: The buffer to place result into 269 * @size: Amount of data to transfer 270 * 271 * Returns: The amount of data actually copied or the error 272 */ 273 static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf, 274 unsigned int size) 275 { 276 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 277 u64 lblock, dblock; 278 u32 extlen = 0; 279 unsigned int o; 280 int copied = 0; 281 int error = 0; 282 283 if (gfs2_is_stuffed(ip)) 284 return gfs2_dir_read_stuffed(ip, buf, size); 285 286 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) 287 return -EINVAL; 288 289 lblock = 0; 290 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); 291 292 while (copied < size) { 293 unsigned int amount; 294 struct buffer_head *bh; 295 int new; 296 297 amount = size - copied; 298 if (amount > sdp->sd_sb.sb_bsize - o) 299 amount = sdp->sd_sb.sb_bsize - o; 300 301 if (!extlen) { 302 new = 0; 303 error = gfs2_extent_map(&ip->i_inode, lblock, &new, 304 &dblock, &extlen); 305 if (error || !dblock) 306 goto fail; 307 BUG_ON(extlen < 1); 308 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 309 } else { 310 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, 0, &bh); 311 if (error) 312 goto fail; 313 } 314 error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD); 315 if (error) { 316 brelse(bh); 317 goto fail; 318 } 319 dblock++; 320 extlen--; 321 memcpy(buf, bh->b_data + o, amount); 322 brelse(bh); 323 buf += (amount/sizeof(__be64)); 324 copied += amount; 325 lblock++; 326 o = sizeof(struct gfs2_meta_header); 327 } 328 329 return copied; 330 fail: 331 return (copied) ? copied : error; 332 } 333 334 /** 335 * gfs2_dir_get_hash_table - Get pointer to the dir hash table 336 * @ip: The inode in question 337 * 338 * Returns: The hash table or an error 339 */ 340 341 static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) 342 { 343 struct inode *inode = &ip->i_inode; 344 int ret; 345 u32 hsize; 346 __be64 *hc; 347 348 BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH)); 349 350 hc = ip->i_hash_cache; 351 if (hc) 352 return hc; 353 354 hsize = 1 << ip->i_depth; 355 hsize *= sizeof(__be64); 356 if (hsize != i_size_read(&ip->i_inode)) { 357 gfs2_consist_inode(ip); 358 return ERR_PTR(-EIO); 359 } 360 361 hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN); 362 if (hc == NULL) 363 hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL); 364 365 if (hc == NULL) 366 return ERR_PTR(-ENOMEM); 367 368 ret = gfs2_dir_read_data(ip, hc, hsize); 369 if (ret < 0) { 370 kvfree(hc); 371 return ERR_PTR(ret); 372 } 373 374 spin_lock(&inode->i_lock); 375 if (likely(!ip->i_hash_cache)) { 376 ip->i_hash_cache = hc; 377 hc = NULL; 378 } 379 spin_unlock(&inode->i_lock); 380 kvfree(hc); 381 382 return ip->i_hash_cache; 383 } 384 385 /** 386 * gfs2_dir_hash_inval - Invalidate dir hash 387 * @ip: The directory inode 388 * 389 * Must be called with an exclusive glock, or during glock invalidation. 390 */ 391 void gfs2_dir_hash_inval(struct gfs2_inode *ip) 392 { 393 __be64 *hc; 394 395 spin_lock(&ip->i_inode.i_lock); 396 hc = ip->i_hash_cache; 397 ip->i_hash_cache = NULL; 398 spin_unlock(&ip->i_inode.i_lock); 399 400 kvfree(hc); 401 } 402 403 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) 404 { 405 return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0; 406 } 407 408 static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, 409 const struct qstr *name, int ret) 410 { 411 if (!gfs2_dirent_sentinel(dent) && 412 be32_to_cpu(dent->de_hash) == name->hash && 413 be16_to_cpu(dent->de_name_len) == name->len && 414 memcmp(dent+1, name->name, name->len) == 0) 415 return ret; 416 return 0; 417 } 418 419 static int gfs2_dirent_find(const struct gfs2_dirent *dent, 420 const struct qstr *name, 421 void *opaque) 422 { 423 return __gfs2_dirent_find(dent, name, 1); 424 } 425 426 static int gfs2_dirent_prev(const struct gfs2_dirent *dent, 427 const struct qstr *name, 428 void *opaque) 429 { 430 return __gfs2_dirent_find(dent, name, 2); 431 } 432 433 /* 434 * name->name holds ptr to start of block. 435 * name->len holds size of block. 436 */ 437 static int gfs2_dirent_last(const struct gfs2_dirent *dent, 438 const struct qstr *name, 439 void *opaque) 440 { 441 const char *start = name->name; 442 const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len); 443 if (name->len == (end - start)) 444 return 1; 445 return 0; 446 } 447 448 /* Look for the dirent that contains the offset specified in data. Once we 449 * find that dirent, there must be space available there for the new dirent */ 450 static int gfs2_dirent_find_offset(const struct gfs2_dirent *dent, 451 const struct qstr *name, 452 void *ptr) 453 { 454 unsigned required = GFS2_DIRENT_SIZE(name->len); 455 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 456 unsigned totlen = be16_to_cpu(dent->de_rec_len); 457 458 if (ptr < (void *)dent || ptr >= (void *)dent + totlen) 459 return 0; 460 if (gfs2_dirent_sentinel(dent)) 461 actual = 0; 462 if (ptr < (void *)dent + actual) 463 return -1; 464 if ((void *)dent + totlen >= ptr + required) 465 return 1; 466 return -1; 467 } 468 469 static int gfs2_dirent_find_space(const struct gfs2_dirent *dent, 470 const struct qstr *name, 471 void *opaque) 472 { 473 unsigned required = GFS2_DIRENT_SIZE(name->len); 474 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 475 unsigned totlen = be16_to_cpu(dent->de_rec_len); 476 477 if (gfs2_dirent_sentinel(dent)) 478 actual = 0; 479 if (totlen - actual >= required) 480 return 1; 481 return 0; 482 } 483 484 struct dirent_gather { 485 const struct gfs2_dirent **pdent; 486 unsigned offset; 487 }; 488 489 static int gfs2_dirent_gather(const struct gfs2_dirent *dent, 490 const struct qstr *name, 491 void *opaque) 492 { 493 struct dirent_gather *g = opaque; 494 if (!gfs2_dirent_sentinel(dent)) { 495 g->pdent[g->offset++] = dent; 496 } 497 return 0; 498 } 499 500 /* 501 * Other possible things to check: 502 * - Inode located within filesystem size (and on valid block) 503 * - Valid directory entry type 504 * Not sure how heavy-weight we want to make this... could also check 505 * hash is correct for example, but that would take a lot of extra time. 506 * For now the most important thing is to check that the various sizes 507 * are correct. 508 */ 509 static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, 510 unsigned int size, unsigned int len, int first) 511 { 512 const char *msg = "gfs2_dirent too small"; 513 if (unlikely(size < sizeof(struct gfs2_dirent))) 514 goto error; 515 msg = "gfs2_dirent misaligned"; 516 if (unlikely(offset & 0x7)) 517 goto error; 518 msg = "gfs2_dirent points beyond end of block"; 519 if (unlikely(offset + size > len)) 520 goto error; 521 msg = "zero inode number"; 522 if (unlikely(!first && gfs2_dirent_sentinel(dent))) 523 goto error; 524 msg = "name length is greater than space in dirent"; 525 if (!gfs2_dirent_sentinel(dent) && 526 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > 527 size)) 528 goto error; 529 return 0; 530 error: 531 pr_warn("%s: %s (%s)\n", 532 __func__, msg, first ? "first in block" : "not first in block"); 533 return -EIO; 534 } 535 536 static int gfs2_dirent_offset(const void *buf) 537 { 538 const struct gfs2_meta_header *h = buf; 539 int offset; 540 541 BUG_ON(buf == NULL); 542 543 switch(be32_to_cpu(h->mh_type)) { 544 case GFS2_METATYPE_LF: 545 offset = sizeof(struct gfs2_leaf); 546 break; 547 case GFS2_METATYPE_DI: 548 offset = sizeof(struct gfs2_dinode); 549 break; 550 default: 551 goto wrong_type; 552 } 553 return offset; 554 wrong_type: 555 pr_warn("%s: wrong block type %u\n", __func__, be32_to_cpu(h->mh_type)); 556 return -1; 557 } 558 559 static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, 560 unsigned int len, gfs2_dscan_t scan, 561 const struct qstr *name, 562 void *opaque) 563 { 564 struct gfs2_dirent *dent, *prev; 565 unsigned offset; 566 unsigned size; 567 int ret = 0; 568 569 ret = gfs2_dirent_offset(buf); 570 if (ret < 0) 571 goto consist_inode; 572 573 offset = ret; 574 prev = NULL; 575 dent = buf + offset; 576 size = be16_to_cpu(dent->de_rec_len); 577 if (gfs2_check_dirent(dent, offset, size, len, 1)) 578 goto consist_inode; 579 do { 580 ret = scan(dent, name, opaque); 581 if (ret) 582 break; 583 offset += size; 584 if (offset == len) 585 break; 586 prev = dent; 587 dent = buf + offset; 588 size = be16_to_cpu(dent->de_rec_len); 589 if (gfs2_check_dirent(dent, offset, size, len, 0)) 590 goto consist_inode; 591 } while(1); 592 593 switch(ret) { 594 case 0: 595 return NULL; 596 case 1: 597 return dent; 598 case 2: 599 return prev ? prev : dent; 600 default: 601 BUG_ON(ret > 0); 602 return ERR_PTR(ret); 603 } 604 605 consist_inode: 606 gfs2_consist_inode(GFS2_I(inode)); 607 return ERR_PTR(-EIO); 608 } 609 610 static int dirent_check_reclen(struct gfs2_inode *dip, 611 const struct gfs2_dirent *d, const void *end_p) 612 { 613 const void *ptr = d; 614 u16 rec_len = be16_to_cpu(d->de_rec_len); 615 616 if (unlikely(rec_len < sizeof(struct gfs2_dirent))) 617 goto broken; 618 ptr += rec_len; 619 if (ptr < end_p) 620 return rec_len; 621 if (ptr == end_p) 622 return -ENOENT; 623 broken: 624 gfs2_consist_inode(dip); 625 return -EIO; 626 } 627 628 /** 629 * dirent_next - Next dirent 630 * @dip: the directory 631 * @bh: The buffer 632 * @dent: Pointer to list of dirents 633 * 634 * Returns: 0 on success, error code otherwise 635 */ 636 637 static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh, 638 struct gfs2_dirent **dent) 639 { 640 struct gfs2_dirent *cur = *dent, *tmp; 641 char *bh_end = bh->b_data + bh->b_size; 642 int ret; 643 644 ret = dirent_check_reclen(dip, cur, bh_end); 645 if (ret < 0) 646 return ret; 647 648 tmp = (void *)cur + ret; 649 ret = dirent_check_reclen(dip, tmp, bh_end); 650 if (ret == -EIO) 651 return ret; 652 653 /* Only the first dent could ever have de_inum.no_addr == 0 */ 654 if (gfs2_dirent_sentinel(tmp)) { 655 gfs2_consist_inode(dip); 656 return -EIO; 657 } 658 659 *dent = tmp; 660 return 0; 661 } 662 663 /** 664 * dirent_del - Delete a dirent 665 * @dip: The GFS2 inode 666 * @bh: The buffer 667 * @prev: The previous dirent 668 * @cur: The current dirent 669 * 670 */ 671 672 static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh, 673 struct gfs2_dirent *prev, struct gfs2_dirent *cur) 674 { 675 u16 cur_rec_len, prev_rec_len; 676 677 if (gfs2_dirent_sentinel(cur)) { 678 gfs2_consist_inode(dip); 679 return; 680 } 681 682 gfs2_trans_add_meta(dip->i_gl, bh); 683 684 /* If there is no prev entry, this is the first entry in the block. 685 The de_rec_len is already as big as it needs to be. Just zero 686 out the inode number and return. */ 687 688 if (!prev) { 689 cur->de_inum.no_addr = 0; 690 cur->de_inum.no_formal_ino = 0; 691 return; 692 } 693 694 /* Combine this dentry with the previous one. */ 695 696 prev_rec_len = be16_to_cpu(prev->de_rec_len); 697 cur_rec_len = be16_to_cpu(cur->de_rec_len); 698 699 if ((char *)prev + prev_rec_len != (char *)cur) 700 gfs2_consist_inode(dip); 701 if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size) 702 gfs2_consist_inode(dip); 703 704 prev_rec_len += cur_rec_len; 705 prev->de_rec_len = cpu_to_be16(prev_rec_len); 706 } 707 708 709 static struct gfs2_dirent *do_init_dirent(struct inode *inode, 710 struct gfs2_dirent *dent, 711 const struct qstr *name, 712 struct buffer_head *bh, 713 unsigned offset) 714 { 715 struct gfs2_inode *ip = GFS2_I(inode); 716 struct gfs2_dirent *ndent; 717 unsigned totlen; 718 719 totlen = be16_to_cpu(dent->de_rec_len); 720 BUG_ON(offset + name->len > totlen); 721 gfs2_trans_add_meta(ip->i_gl, bh); 722 ndent = (struct gfs2_dirent *)((char *)dent + offset); 723 dent->de_rec_len = cpu_to_be16(offset); 724 gfs2_qstr2dirent(name, totlen - offset, ndent); 725 return ndent; 726 } 727 728 729 /* 730 * Takes a dent from which to grab space as an argument. Returns the 731 * newly created dent. 732 */ 733 static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode, 734 struct gfs2_dirent *dent, 735 const struct qstr *name, 736 struct buffer_head *bh) 737 { 738 unsigned offset = 0; 739 740 if (!gfs2_dirent_sentinel(dent)) 741 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 742 return do_init_dirent(inode, dent, name, bh, offset); 743 } 744 745 static struct gfs2_dirent *gfs2_dirent_split_alloc(struct inode *inode, 746 struct buffer_head *bh, 747 const struct qstr *name, 748 void *ptr) 749 { 750 struct gfs2_dirent *dent; 751 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 752 gfs2_dirent_find_offset, name, ptr); 753 if (!dent || IS_ERR(dent)) 754 return dent; 755 return do_init_dirent(inode, dent, name, bh, 756 (unsigned)(ptr - (void *)dent)); 757 } 758 759 static int get_leaf(struct gfs2_inode *dip, u64 leaf_no, 760 struct buffer_head **bhp) 761 { 762 int error; 763 764 error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, 0, bhp); 765 if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) { 766 /* pr_info("block num=%llu\n", leaf_no); */ 767 error = -EIO; 768 } 769 770 return error; 771 } 772 773 /** 774 * get_leaf_nr - Get a leaf number associated with the index 775 * @dip: The GFS2 inode 776 * @index: 777 * @leaf_out: 778 * 779 * Returns: 0 on success, error code otherwise 780 */ 781 782 static int get_leaf_nr(struct gfs2_inode *dip, u32 index, 783 u64 *leaf_out) 784 { 785 __be64 *hash; 786 787 hash = gfs2_dir_get_hash_table(dip); 788 if (IS_ERR(hash)) 789 return PTR_ERR(hash); 790 *leaf_out = be64_to_cpu(*(hash + index)); 791 return 0; 792 } 793 794 static int get_first_leaf(struct gfs2_inode *dip, u32 index, 795 struct buffer_head **bh_out) 796 { 797 u64 leaf_no; 798 int error; 799 800 error = get_leaf_nr(dip, index, &leaf_no); 801 if (!error) 802 error = get_leaf(dip, leaf_no, bh_out); 803 804 return error; 805 } 806 807 static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, 808 const struct qstr *name, 809 gfs2_dscan_t scan, 810 struct buffer_head **pbh) 811 { 812 struct buffer_head *bh; 813 struct gfs2_dirent *dent; 814 struct gfs2_inode *ip = GFS2_I(inode); 815 int error; 816 817 if (ip->i_diskflags & GFS2_DIF_EXHASH) { 818 struct gfs2_leaf *leaf; 819 unsigned hsize = 1 << ip->i_depth; 820 unsigned index; 821 u64 ln; 822 if (hsize * sizeof(u64) != i_size_read(inode)) { 823 gfs2_consist_inode(ip); 824 return ERR_PTR(-EIO); 825 } 826 827 index = name->hash >> (32 - ip->i_depth); 828 error = get_first_leaf(ip, index, &bh); 829 if (error) 830 return ERR_PTR(error); 831 do { 832 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 833 scan, name, NULL); 834 if (dent) 835 goto got_dent; 836 leaf = (struct gfs2_leaf *)bh->b_data; 837 ln = be64_to_cpu(leaf->lf_next); 838 brelse(bh); 839 if (!ln) 840 break; 841 842 error = get_leaf(ip, ln, &bh); 843 } while(!error); 844 845 return error ? ERR_PTR(error) : NULL; 846 } 847 848 849 error = gfs2_meta_inode_buffer(ip, &bh); 850 if (error) 851 return ERR_PTR(error); 852 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL); 853 got_dent: 854 if (unlikely(dent == NULL || IS_ERR(dent))) { 855 brelse(bh); 856 bh = NULL; 857 } 858 *pbh = bh; 859 return dent; 860 } 861 862 static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth) 863 { 864 struct gfs2_inode *ip = GFS2_I(inode); 865 unsigned int n = 1; 866 u64 bn; 867 int error; 868 struct buffer_head *bh; 869 struct gfs2_leaf *leaf; 870 struct gfs2_dirent *dent; 871 struct qstr name = { .name = "" }; 872 struct timespec tv = CURRENT_TIME; 873 874 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); 875 if (error) 876 return NULL; 877 bh = gfs2_meta_new(ip->i_gl, bn); 878 if (!bh) 879 return NULL; 880 881 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1); 882 gfs2_trans_add_meta(ip->i_gl, bh); 883 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF); 884 leaf = (struct gfs2_leaf *)bh->b_data; 885 leaf->lf_depth = cpu_to_be16(depth); 886 leaf->lf_entries = 0; 887 leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE); 888 leaf->lf_next = 0; 889 leaf->lf_inode = cpu_to_be64(ip->i_no_addr); 890 leaf->lf_dist = cpu_to_be32(1); 891 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 892 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 893 memset(leaf->lf_reserved2, 0, sizeof(leaf->lf_reserved2)); 894 dent = (struct gfs2_dirent *)(leaf+1); 895 gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent); 896 *pbh = bh; 897 return leaf; 898 } 899 900 /** 901 * dir_make_exhash - Convert a stuffed directory into an ExHash directory 902 * @dip: The GFS2 inode 903 * 904 * Returns: 0 on success, error code otherwise 905 */ 906 907 static int dir_make_exhash(struct inode *inode) 908 { 909 struct gfs2_inode *dip = GFS2_I(inode); 910 struct gfs2_sbd *sdp = GFS2_SB(inode); 911 struct gfs2_dirent *dent; 912 struct qstr args; 913 struct buffer_head *bh, *dibh; 914 struct gfs2_leaf *leaf; 915 int y; 916 u32 x; 917 __be64 *lp; 918 u64 bn; 919 int error; 920 921 error = gfs2_meta_inode_buffer(dip, &dibh); 922 if (error) 923 return error; 924 925 /* Turn over a new leaf */ 926 927 leaf = new_leaf(inode, &bh, 0); 928 if (!leaf) 929 return -ENOSPC; 930 bn = bh->b_blocknr; 931 932 gfs2_assert(sdp, dip->i_entries < (1 << 16)); 933 leaf->lf_entries = cpu_to_be16(dip->i_entries); 934 935 /* Copy dirents */ 936 937 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh, 938 sizeof(struct gfs2_dinode)); 939 940 /* Find last entry */ 941 942 x = 0; 943 args.len = bh->b_size - sizeof(struct gfs2_dinode) + 944 sizeof(struct gfs2_leaf); 945 args.name = bh->b_data; 946 dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size, 947 gfs2_dirent_last, &args, NULL); 948 if (!dent) { 949 brelse(bh); 950 brelse(dibh); 951 return -EIO; 952 } 953 if (IS_ERR(dent)) { 954 brelse(bh); 955 brelse(dibh); 956 return PTR_ERR(dent); 957 } 958 959 /* Adjust the last dirent's record length 960 (Remember that dent still points to the last entry.) */ 961 962 dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) + 963 sizeof(struct gfs2_dinode) - 964 sizeof(struct gfs2_leaf)); 965 966 brelse(bh); 967 968 /* We're done with the new leaf block, now setup the new 969 hash table. */ 970 971 gfs2_trans_add_meta(dip->i_gl, dibh); 972 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 973 974 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); 975 976 for (x = sdp->sd_hash_ptrs; x--; lp++) 977 *lp = cpu_to_be64(bn); 978 979 i_size_write(inode, sdp->sd_sb.sb_bsize / 2); 980 gfs2_add_inode_blocks(&dip->i_inode, 1); 981 dip->i_diskflags |= GFS2_DIF_EXHASH; 982 983 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 984 dip->i_depth = y; 985 986 gfs2_dinode_out(dip, dibh->b_data); 987 988 brelse(dibh); 989 990 return 0; 991 } 992 993 /** 994 * dir_split_leaf - Split a leaf block into two 995 * @dip: The GFS2 inode 996 * @index: 997 * @leaf_no: 998 * 999 * Returns: 0 on success, error code on failure 1000 */ 1001 1002 static int dir_split_leaf(struct inode *inode, const struct qstr *name) 1003 { 1004 struct gfs2_inode *dip = GFS2_I(inode); 1005 struct buffer_head *nbh, *obh, *dibh; 1006 struct gfs2_leaf *nleaf, *oleaf; 1007 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; 1008 u32 start, len, half_len, divider; 1009 u64 bn, leaf_no; 1010 __be64 *lp; 1011 u32 index; 1012 int x, moved = 0; 1013 int error; 1014 1015 index = name->hash >> (32 - dip->i_depth); 1016 error = get_leaf_nr(dip, index, &leaf_no); 1017 if (error) 1018 return error; 1019 1020 /* Get the old leaf block */ 1021 error = get_leaf(dip, leaf_no, &obh); 1022 if (error) 1023 return error; 1024 1025 oleaf = (struct gfs2_leaf *)obh->b_data; 1026 if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) { 1027 brelse(obh); 1028 return 1; /* can't split */ 1029 } 1030 1031 gfs2_trans_add_meta(dip->i_gl, obh); 1032 1033 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1); 1034 if (!nleaf) { 1035 brelse(obh); 1036 return -ENOSPC; 1037 } 1038 bn = nbh->b_blocknr; 1039 1040 /* Compute the start and len of leaf pointers in the hash table. */ 1041 len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth)); 1042 half_len = len >> 1; 1043 if (!half_len) { 1044 pr_warn("i_depth %u lf_depth %u index %u\n", 1045 dip->i_depth, be16_to_cpu(oleaf->lf_depth), index); 1046 gfs2_consist_inode(dip); 1047 error = -EIO; 1048 goto fail_brelse; 1049 } 1050 1051 start = (index & ~(len - 1)); 1052 1053 /* Change the pointers. 1054 Don't bother distinguishing stuffed from non-stuffed. 1055 This code is complicated enough already. */ 1056 lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); 1057 if (!lp) { 1058 error = -ENOMEM; 1059 goto fail_brelse; 1060 } 1061 1062 /* Change the pointers */ 1063 for (x = 0; x < half_len; x++) 1064 lp[x] = cpu_to_be64(bn); 1065 1066 gfs2_dir_hash_inval(dip); 1067 1068 error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), 1069 half_len * sizeof(u64)); 1070 if (error != half_len * sizeof(u64)) { 1071 if (error >= 0) 1072 error = -EIO; 1073 goto fail_lpfree; 1074 } 1075 1076 kfree(lp); 1077 1078 /* Compute the divider */ 1079 divider = (start + half_len) << (32 - dip->i_depth); 1080 1081 /* Copy the entries */ 1082 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf)); 1083 1084 do { 1085 next = dent; 1086 if (dirent_next(dip, obh, &next)) 1087 next = NULL; 1088 1089 if (!gfs2_dirent_sentinel(dent) && 1090 be32_to_cpu(dent->de_hash) < divider) { 1091 struct qstr str; 1092 void *ptr = ((char *)dent - obh->b_data) + nbh->b_data; 1093 str.name = (char*)(dent+1); 1094 str.len = be16_to_cpu(dent->de_name_len); 1095 str.hash = be32_to_cpu(dent->de_hash); 1096 new = gfs2_dirent_split_alloc(inode, nbh, &str, ptr); 1097 if (IS_ERR(new)) { 1098 error = PTR_ERR(new); 1099 break; 1100 } 1101 1102 new->de_inum = dent->de_inum; /* No endian worries */ 1103 new->de_type = dent->de_type; /* No endian worries */ 1104 be16_add_cpu(&nleaf->lf_entries, 1); 1105 1106 dirent_del(dip, obh, prev, dent); 1107 1108 if (!oleaf->lf_entries) 1109 gfs2_consist_inode(dip); 1110 be16_add_cpu(&oleaf->lf_entries, -1); 1111 1112 if (!prev) 1113 prev = dent; 1114 1115 moved = 1; 1116 } else { 1117 prev = dent; 1118 } 1119 dent = next; 1120 } while (dent); 1121 1122 oleaf->lf_depth = nleaf->lf_depth; 1123 1124 error = gfs2_meta_inode_buffer(dip, &dibh); 1125 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1126 gfs2_trans_add_meta(dip->i_gl, dibh); 1127 gfs2_add_inode_blocks(&dip->i_inode, 1); 1128 gfs2_dinode_out(dip, dibh->b_data); 1129 brelse(dibh); 1130 } 1131 1132 brelse(obh); 1133 brelse(nbh); 1134 1135 return error; 1136 1137 fail_lpfree: 1138 kfree(lp); 1139 1140 fail_brelse: 1141 brelse(obh); 1142 brelse(nbh); 1143 return error; 1144 } 1145 1146 /** 1147 * dir_double_exhash - Double size of ExHash table 1148 * @dip: The GFS2 dinode 1149 * 1150 * Returns: 0 on success, error code on failure 1151 */ 1152 1153 static int dir_double_exhash(struct gfs2_inode *dip) 1154 { 1155 struct buffer_head *dibh; 1156 u32 hsize; 1157 u32 hsize_bytes; 1158 __be64 *hc; 1159 __be64 *hc2, *h; 1160 int x; 1161 int error = 0; 1162 1163 hsize = 1 << dip->i_depth; 1164 hsize_bytes = hsize * sizeof(__be64); 1165 1166 hc = gfs2_dir_get_hash_table(dip); 1167 if (IS_ERR(hc)) 1168 return PTR_ERR(hc); 1169 1170 hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN); 1171 if (hc2 == NULL) 1172 hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); 1173 1174 if (!hc2) 1175 return -ENOMEM; 1176 1177 h = hc2; 1178 error = gfs2_meta_inode_buffer(dip, &dibh); 1179 if (error) 1180 goto out_kfree; 1181 1182 for (x = 0; x < hsize; x++) { 1183 *h++ = *hc; 1184 *h++ = *hc; 1185 hc++; 1186 } 1187 1188 error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2); 1189 if (error != (hsize_bytes * 2)) 1190 goto fail; 1191 1192 gfs2_dir_hash_inval(dip); 1193 dip->i_hash_cache = hc2; 1194 dip->i_depth++; 1195 gfs2_dinode_out(dip, dibh->b_data); 1196 brelse(dibh); 1197 return 0; 1198 1199 fail: 1200 /* Replace original hash table & size */ 1201 gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes); 1202 i_size_write(&dip->i_inode, hsize_bytes); 1203 gfs2_dinode_out(dip, dibh->b_data); 1204 brelse(dibh); 1205 out_kfree: 1206 kvfree(hc2); 1207 return error; 1208 } 1209 1210 /** 1211 * compare_dents - compare directory entries by hash value 1212 * @a: first dent 1213 * @b: second dent 1214 * 1215 * When comparing the hash entries of @a to @b: 1216 * gt: returns 1 1217 * lt: returns -1 1218 * eq: returns 0 1219 */ 1220 1221 static int compare_dents(const void *a, const void *b) 1222 { 1223 const struct gfs2_dirent *dent_a, *dent_b; 1224 u32 hash_a, hash_b; 1225 int ret = 0; 1226 1227 dent_a = *(const struct gfs2_dirent **)a; 1228 hash_a = dent_a->de_cookie; 1229 1230 dent_b = *(const struct gfs2_dirent **)b; 1231 hash_b = dent_b->de_cookie; 1232 1233 if (hash_a > hash_b) 1234 ret = 1; 1235 else if (hash_a < hash_b) 1236 ret = -1; 1237 else { 1238 unsigned int len_a = be16_to_cpu(dent_a->de_name_len); 1239 unsigned int len_b = be16_to_cpu(dent_b->de_name_len); 1240 1241 if (len_a > len_b) 1242 ret = 1; 1243 else if (len_a < len_b) 1244 ret = -1; 1245 else 1246 ret = memcmp(dent_a + 1, dent_b + 1, len_a); 1247 } 1248 1249 return ret; 1250 } 1251 1252 /** 1253 * do_filldir_main - read out directory entries 1254 * @dip: The GFS2 inode 1255 * @ctx: what to feed the entries to 1256 * @darr: an array of struct gfs2_dirent pointers to read 1257 * @entries: the number of entries in darr 1258 * @copied: pointer to int that's non-zero if a entry has been copied out 1259 * 1260 * Jump through some hoops to make sure that if there are hash collsions, 1261 * they are read out at the beginning of a buffer. We want to minimize 1262 * the possibility that they will fall into different readdir buffers or 1263 * that someone will want to seek to that location. 1264 * 1265 * Returns: errno, >0 if the actor tells you to stop 1266 */ 1267 1268 static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, 1269 struct gfs2_dirent **darr, u32 entries, 1270 u32 sort_start, int *copied) 1271 { 1272 const struct gfs2_dirent *dent, *dent_next; 1273 u64 off, off_next; 1274 unsigned int x, y; 1275 int run = 0; 1276 1277 if (sort_start < entries) 1278 sort(&darr[sort_start], entries - sort_start, 1279 sizeof(struct gfs2_dirent *), compare_dents, NULL); 1280 1281 dent_next = darr[0]; 1282 off_next = dent_next->de_cookie; 1283 1284 for (x = 0, y = 1; x < entries; x++, y++) { 1285 dent = dent_next; 1286 off = off_next; 1287 1288 if (y < entries) { 1289 dent_next = darr[y]; 1290 off_next = dent_next->de_cookie; 1291 1292 if (off < ctx->pos) 1293 continue; 1294 ctx->pos = off; 1295 1296 if (off_next == off) { 1297 if (*copied && !run) 1298 return 1; 1299 run = 1; 1300 } else 1301 run = 0; 1302 } else { 1303 if (off < ctx->pos) 1304 continue; 1305 ctx->pos = off; 1306 } 1307 1308 if (!dir_emit(ctx, (const char *)(dent + 1), 1309 be16_to_cpu(dent->de_name_len), 1310 be64_to_cpu(dent->de_inum.no_addr), 1311 be16_to_cpu(dent->de_type))) 1312 return 1; 1313 1314 *copied = 1; 1315 } 1316 1317 /* Increment the ctx->pos by one, so the next time we come into the 1318 do_filldir fxn, we get the next entry instead of the last one in the 1319 current leaf */ 1320 1321 ctx->pos++; 1322 1323 return 0; 1324 } 1325 1326 static void *gfs2_alloc_sort_buffer(unsigned size) 1327 { 1328 void *ptr = NULL; 1329 1330 if (size < KMALLOC_MAX_SIZE) 1331 ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); 1332 if (!ptr) 1333 ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); 1334 return ptr; 1335 } 1336 1337 1338 static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh, 1339 unsigned leaf_nr, struct gfs2_dirent **darr, 1340 unsigned entries) 1341 { 1342 int sort_id = -1; 1343 int i; 1344 1345 for (i = 0; i < entries; i++) { 1346 unsigned offset; 1347 1348 darr[i]->de_cookie = be32_to_cpu(darr[i]->de_hash); 1349 darr[i]->de_cookie = gfs2_disk_hash2offset(darr[i]->de_cookie); 1350 1351 if (!sdp->sd_args.ar_loccookie) 1352 continue; 1353 offset = (char *)(darr[i]) - 1354 (bh->b_data + gfs2_dirent_offset(bh->b_data)); 1355 offset /= GFS2_MIN_DIRENT_SIZE; 1356 offset += leaf_nr * sdp->sd_max_dents_per_leaf; 1357 if (offset >= GFS2_USE_HASH_FLAG || 1358 leaf_nr >= GFS2_USE_HASH_FLAG) { 1359 darr[i]->de_cookie |= GFS2_USE_HASH_FLAG; 1360 if (sort_id < 0) 1361 sort_id = i; 1362 continue; 1363 } 1364 darr[i]->de_cookie &= GFS2_HASH_INDEX_MASK; 1365 darr[i]->de_cookie |= offset; 1366 } 1367 return sort_id; 1368 } 1369 1370 1371 static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx, 1372 int *copied, unsigned *depth, 1373 u64 leaf_no) 1374 { 1375 struct gfs2_inode *ip = GFS2_I(inode); 1376 struct gfs2_sbd *sdp = GFS2_SB(inode); 1377 struct buffer_head *bh; 1378 struct gfs2_leaf *lf; 1379 unsigned entries = 0, entries2 = 0; 1380 unsigned leaves = 0, leaf = 0, offset, sort_offset; 1381 struct gfs2_dirent **darr, *dent; 1382 struct dirent_gather g; 1383 struct buffer_head **larr; 1384 int error, i, need_sort = 0, sort_id; 1385 u64 lfn = leaf_no; 1386 1387 do { 1388 error = get_leaf(ip, lfn, &bh); 1389 if (error) 1390 goto out; 1391 lf = (struct gfs2_leaf *)bh->b_data; 1392 if (leaves == 0) 1393 *depth = be16_to_cpu(lf->lf_depth); 1394 entries += be16_to_cpu(lf->lf_entries); 1395 leaves++; 1396 lfn = be64_to_cpu(lf->lf_next); 1397 brelse(bh); 1398 } while(lfn); 1399 1400 if (*depth < GFS2_DIR_MAX_DEPTH || !sdp->sd_args.ar_loccookie) { 1401 need_sort = 1; 1402 sort_offset = 0; 1403 } 1404 1405 if (!entries) 1406 return 0; 1407 1408 error = -ENOMEM; 1409 /* 1410 * The extra 99 entries are not normally used, but are a buffer 1411 * zone in case the number of entries in the leaf is corrupt. 1412 * 99 is the maximum number of entries that can fit in a single 1413 * leaf block. 1414 */ 1415 larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *)); 1416 if (!larr) 1417 goto out; 1418 darr = (struct gfs2_dirent **)(larr + leaves); 1419 g.pdent = (const struct gfs2_dirent **)darr; 1420 g.offset = 0; 1421 lfn = leaf_no; 1422 1423 do { 1424 error = get_leaf(ip, lfn, &bh); 1425 if (error) 1426 goto out_free; 1427 lf = (struct gfs2_leaf *)bh->b_data; 1428 lfn = be64_to_cpu(lf->lf_next); 1429 if (lf->lf_entries) { 1430 offset = g.offset; 1431 entries2 += be16_to_cpu(lf->lf_entries); 1432 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 1433 gfs2_dirent_gather, NULL, &g); 1434 error = PTR_ERR(dent); 1435 if (IS_ERR(dent)) 1436 goto out_free; 1437 if (entries2 != g.offset) { 1438 fs_warn(sdp, "Number of entries corrupt in dir " 1439 "leaf %llu, entries2 (%u) != " 1440 "g.offset (%u)\n", 1441 (unsigned long long)bh->b_blocknr, 1442 entries2, g.offset); 1443 1444 error = -EIO; 1445 goto out_free; 1446 } 1447 error = 0; 1448 sort_id = gfs2_set_cookies(sdp, bh, leaf, &darr[offset], 1449 be16_to_cpu(lf->lf_entries)); 1450 if (!need_sort && sort_id >= 0) { 1451 need_sort = 1; 1452 sort_offset = offset + sort_id; 1453 } 1454 larr[leaf++] = bh; 1455 } else { 1456 larr[leaf++] = NULL; 1457 brelse(bh); 1458 } 1459 } while(lfn); 1460 1461 BUG_ON(entries2 != entries); 1462 error = do_filldir_main(ip, ctx, darr, entries, need_sort ? 1463 sort_offset : entries, copied); 1464 out_free: 1465 for(i = 0; i < leaf; i++) 1466 if (larr[i]) 1467 brelse(larr[i]); 1468 kvfree(larr); 1469 out: 1470 return error; 1471 } 1472 1473 /** 1474 * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks. 1475 * 1476 * Note: we can't calculate each index like dir_e_read can because we don't 1477 * have the leaf, and therefore we don't have the depth, and therefore we 1478 * don't have the length. So we have to just read enough ahead to make up 1479 * for the loss of information. 1480 */ 1481 static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, 1482 struct file_ra_state *f_ra) 1483 { 1484 struct gfs2_inode *ip = GFS2_I(inode); 1485 struct gfs2_glock *gl = ip->i_gl; 1486 struct buffer_head *bh; 1487 u64 blocknr = 0, last; 1488 unsigned count; 1489 1490 /* First check if we've already read-ahead for the whole range. */ 1491 if (index + MAX_RA_BLOCKS < f_ra->start) 1492 return; 1493 1494 f_ra->start = max((pgoff_t)index, f_ra->start); 1495 for (count = 0; count < MAX_RA_BLOCKS; count++) { 1496 if (f_ra->start >= hsize) /* if exceeded the hash table */ 1497 break; 1498 1499 last = blocknr; 1500 blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); 1501 f_ra->start++; 1502 if (blocknr == last) 1503 continue; 1504 1505 bh = gfs2_getbuf(gl, blocknr, 1); 1506 if (trylock_buffer(bh)) { 1507 if (buffer_uptodate(bh)) { 1508 unlock_buffer(bh); 1509 brelse(bh); 1510 continue; 1511 } 1512 bh->b_end_io = end_buffer_read_sync; 1513 submit_bh(READA | REQ_META, bh); 1514 continue; 1515 } 1516 brelse(bh); 1517 } 1518 } 1519 1520 /** 1521 * dir_e_read - Reads the entries from a directory into a filldir buffer 1522 * @dip: dinode pointer 1523 * @ctx: actor to feed the entries to 1524 * 1525 * Returns: errno 1526 */ 1527 1528 static int dir_e_read(struct inode *inode, struct dir_context *ctx, 1529 struct file_ra_state *f_ra) 1530 { 1531 struct gfs2_inode *dip = GFS2_I(inode); 1532 u32 hsize, len = 0; 1533 u32 hash, index; 1534 __be64 *lp; 1535 int copied = 0; 1536 int error = 0; 1537 unsigned depth = 0; 1538 1539 hsize = 1 << dip->i_depth; 1540 hash = gfs2_dir_offset2hash(ctx->pos); 1541 index = hash >> (32 - dip->i_depth); 1542 1543 if (dip->i_hash_cache == NULL) 1544 f_ra->start = 0; 1545 lp = gfs2_dir_get_hash_table(dip); 1546 if (IS_ERR(lp)) 1547 return PTR_ERR(lp); 1548 1549 gfs2_dir_readahead(inode, hsize, index, f_ra); 1550 1551 while (index < hsize) { 1552 error = gfs2_dir_read_leaf(inode, ctx, 1553 &copied, &depth, 1554 be64_to_cpu(lp[index])); 1555 if (error) 1556 break; 1557 1558 len = 1 << (dip->i_depth - depth); 1559 index = (index & ~(len - 1)) + len; 1560 } 1561 1562 if (error > 0) 1563 error = 0; 1564 return error; 1565 } 1566 1567 int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, 1568 struct file_ra_state *f_ra) 1569 { 1570 struct gfs2_inode *dip = GFS2_I(inode); 1571 struct gfs2_sbd *sdp = GFS2_SB(inode); 1572 struct dirent_gather g; 1573 struct gfs2_dirent **darr, *dent; 1574 struct buffer_head *dibh; 1575 int copied = 0; 1576 int error; 1577 1578 if (!dip->i_entries) 1579 return 0; 1580 1581 if (dip->i_diskflags & GFS2_DIF_EXHASH) 1582 return dir_e_read(inode, ctx, f_ra); 1583 1584 if (!gfs2_is_stuffed(dip)) { 1585 gfs2_consist_inode(dip); 1586 return -EIO; 1587 } 1588 1589 error = gfs2_meta_inode_buffer(dip, &dibh); 1590 if (error) 1591 return error; 1592 1593 error = -ENOMEM; 1594 /* 96 is max number of dirents which can be stuffed into an inode */ 1595 darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS); 1596 if (darr) { 1597 g.pdent = (const struct gfs2_dirent **)darr; 1598 g.offset = 0; 1599 dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size, 1600 gfs2_dirent_gather, NULL, &g); 1601 if (IS_ERR(dent)) { 1602 error = PTR_ERR(dent); 1603 goto out; 1604 } 1605 if (dip->i_entries != g.offset) { 1606 fs_warn(sdp, "Number of entries corrupt in dir %llu, " 1607 "ip->i_entries (%u) != g.offset (%u)\n", 1608 (unsigned long long)dip->i_no_addr, 1609 dip->i_entries, 1610 g.offset); 1611 error = -EIO; 1612 goto out; 1613 } 1614 gfs2_set_cookies(sdp, dibh, 0, darr, dip->i_entries); 1615 error = do_filldir_main(dip, ctx, darr, 1616 dip->i_entries, 0, &copied); 1617 out: 1618 kfree(darr); 1619 } 1620 1621 if (error > 0) 1622 error = 0; 1623 1624 brelse(dibh); 1625 1626 return error; 1627 } 1628 1629 /** 1630 * gfs2_dir_search - Search a directory 1631 * @dip: The GFS2 dir inode 1632 * @name: The name we are looking up 1633 * @fail_on_exist: Fail if the name exists rather than looking it up 1634 * 1635 * This routine searches a directory for a file or another directory. 1636 * Assumes a glock is held on dip. 1637 * 1638 * Returns: errno 1639 */ 1640 1641 struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name, 1642 bool fail_on_exist) 1643 { 1644 struct buffer_head *bh; 1645 struct gfs2_dirent *dent; 1646 u64 addr, formal_ino; 1647 u16 dtype; 1648 1649 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); 1650 if (dent) { 1651 struct inode *inode; 1652 u16 rahead; 1653 1654 if (IS_ERR(dent)) 1655 return ERR_CAST(dent); 1656 dtype = be16_to_cpu(dent->de_type); 1657 rahead = be16_to_cpu(dent->de_rahead); 1658 addr = be64_to_cpu(dent->de_inum.no_addr); 1659 formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino); 1660 brelse(bh); 1661 if (fail_on_exist) 1662 return ERR_PTR(-EEXIST); 1663 inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino, 0); 1664 if (!IS_ERR(inode)) 1665 GFS2_I(inode)->i_rahead = rahead; 1666 return inode; 1667 } 1668 return ERR_PTR(-ENOENT); 1669 } 1670 1671 int gfs2_dir_check(struct inode *dir, const struct qstr *name, 1672 const struct gfs2_inode *ip) 1673 { 1674 struct buffer_head *bh; 1675 struct gfs2_dirent *dent; 1676 int ret = -ENOENT; 1677 1678 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); 1679 if (dent) { 1680 if (IS_ERR(dent)) 1681 return PTR_ERR(dent); 1682 if (ip) { 1683 if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr) 1684 goto out; 1685 if (be64_to_cpu(dent->de_inum.no_formal_ino) != 1686 ip->i_no_formal_ino) 1687 goto out; 1688 if (unlikely(IF2DT(ip->i_inode.i_mode) != 1689 be16_to_cpu(dent->de_type))) { 1690 gfs2_consist_inode(GFS2_I(dir)); 1691 ret = -EIO; 1692 goto out; 1693 } 1694 } 1695 ret = 0; 1696 out: 1697 brelse(bh); 1698 } 1699 return ret; 1700 } 1701 1702 /** 1703 * dir_new_leaf - Add a new leaf onto hash chain 1704 * @inode: The directory 1705 * @name: The name we are adding 1706 * 1707 * This adds a new dir leaf onto an existing leaf when there is not 1708 * enough space to add a new dir entry. This is a last resort after 1709 * we've expanded the hash table to max size and also split existing 1710 * leaf blocks, so it will only occur for very large directories. 1711 * 1712 * The dist parameter is set to 1 for leaf blocks directly attached 1713 * to the hash table, 2 for one layer of indirection, 3 for two layers 1714 * etc. We are thus able to tell the difference between an old leaf 1715 * with dist set to zero (i.e. "don't know") and a new one where we 1716 * set this information for debug/fsck purposes. 1717 * 1718 * Returns: 0 on success, or -ve on error 1719 */ 1720 1721 static int dir_new_leaf(struct inode *inode, const struct qstr *name) 1722 { 1723 struct buffer_head *bh, *obh; 1724 struct gfs2_inode *ip = GFS2_I(inode); 1725 struct gfs2_leaf *leaf, *oleaf; 1726 u32 dist = 1; 1727 int error; 1728 u32 index; 1729 u64 bn; 1730 1731 index = name->hash >> (32 - ip->i_depth); 1732 error = get_first_leaf(ip, index, &obh); 1733 if (error) 1734 return error; 1735 do { 1736 dist++; 1737 oleaf = (struct gfs2_leaf *)obh->b_data; 1738 bn = be64_to_cpu(oleaf->lf_next); 1739 if (!bn) 1740 break; 1741 brelse(obh); 1742 error = get_leaf(ip, bn, &obh); 1743 if (error) 1744 return error; 1745 } while(1); 1746 1747 gfs2_trans_add_meta(ip->i_gl, obh); 1748 1749 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth)); 1750 if (!leaf) { 1751 brelse(obh); 1752 return -ENOSPC; 1753 } 1754 leaf->lf_dist = cpu_to_be32(dist); 1755 oleaf->lf_next = cpu_to_be64(bh->b_blocknr); 1756 brelse(bh); 1757 brelse(obh); 1758 1759 error = gfs2_meta_inode_buffer(ip, &bh); 1760 if (error) 1761 return error; 1762 gfs2_trans_add_meta(ip->i_gl, bh); 1763 gfs2_add_inode_blocks(&ip->i_inode, 1); 1764 gfs2_dinode_out(ip, bh->b_data); 1765 brelse(bh); 1766 return 0; 1767 } 1768 1769 static u16 gfs2_inode_ra_len(const struct gfs2_inode *ip) 1770 { 1771 u64 where = ip->i_no_addr + 1; 1772 if (ip->i_eattr == where) 1773 return 1; 1774 return 0; 1775 } 1776 1777 /** 1778 * gfs2_dir_add - Add new filename into directory 1779 * @inode: The directory inode 1780 * @name: The new name 1781 * @nip: The GFS2 inode to be linked in to the directory 1782 * @da: The directory addition info 1783 * 1784 * If the call to gfs2_diradd_alloc_required resulted in there being 1785 * no need to allocate any new directory blocks, then it will contain 1786 * a pointer to the directory entry and the bh in which it resides. We 1787 * can use that without having to repeat the search. If there was no 1788 * free space, then we must now create more space. 1789 * 1790 * Returns: 0 on success, error code on failure 1791 */ 1792 1793 int gfs2_dir_add(struct inode *inode, const struct qstr *name, 1794 const struct gfs2_inode *nip, struct gfs2_diradd *da) 1795 { 1796 struct gfs2_inode *ip = GFS2_I(inode); 1797 struct buffer_head *bh = da->bh; 1798 struct gfs2_dirent *dent = da->dent; 1799 struct timespec tv; 1800 struct gfs2_leaf *leaf; 1801 int error; 1802 1803 while(1) { 1804 if (da->bh == NULL) { 1805 dent = gfs2_dirent_search(inode, name, 1806 gfs2_dirent_find_space, &bh); 1807 } 1808 if (dent) { 1809 if (IS_ERR(dent)) 1810 return PTR_ERR(dent); 1811 dent = gfs2_init_dirent(inode, dent, name, bh); 1812 gfs2_inum_out(nip, dent); 1813 dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode)); 1814 dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip)); 1815 tv = CURRENT_TIME; 1816 if (ip->i_diskflags & GFS2_DIF_EXHASH) { 1817 leaf = (struct gfs2_leaf *)bh->b_data; 1818 be16_add_cpu(&leaf->lf_entries, 1); 1819 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 1820 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 1821 } 1822 da->dent = NULL; 1823 da->bh = NULL; 1824 brelse(bh); 1825 ip->i_entries++; 1826 ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv; 1827 if (S_ISDIR(nip->i_inode.i_mode)) 1828 inc_nlink(&ip->i_inode); 1829 mark_inode_dirty(inode); 1830 error = 0; 1831 break; 1832 } 1833 if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { 1834 error = dir_make_exhash(inode); 1835 if (error) 1836 break; 1837 continue; 1838 } 1839 error = dir_split_leaf(inode, name); 1840 if (error == 0) 1841 continue; 1842 if (error < 0) 1843 break; 1844 if (ip->i_depth < GFS2_DIR_MAX_DEPTH) { 1845 error = dir_double_exhash(ip); 1846 if (error) 1847 break; 1848 error = dir_split_leaf(inode, name); 1849 if (error < 0) 1850 break; 1851 if (error == 0) 1852 continue; 1853 } 1854 error = dir_new_leaf(inode, name); 1855 if (!error) 1856 continue; 1857 error = -ENOSPC; 1858 break; 1859 } 1860 return error; 1861 } 1862 1863 1864 /** 1865 * gfs2_dir_del - Delete a directory entry 1866 * @dip: The GFS2 inode 1867 * @filename: The filename 1868 * 1869 * Returns: 0 on success, error code on failure 1870 */ 1871 1872 int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) 1873 { 1874 const struct qstr *name = &dentry->d_name; 1875 struct gfs2_dirent *dent, *prev = NULL; 1876 struct buffer_head *bh; 1877 struct timespec tv = CURRENT_TIME; 1878 1879 /* Returns _either_ the entry (if its first in block) or the 1880 previous entry otherwise */ 1881 dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh); 1882 if (!dent) { 1883 gfs2_consist_inode(dip); 1884 return -EIO; 1885 } 1886 if (IS_ERR(dent)) { 1887 gfs2_consist_inode(dip); 1888 return PTR_ERR(dent); 1889 } 1890 /* If not first in block, adjust pointers accordingly */ 1891 if (gfs2_dirent_find(dent, name, NULL) == 0) { 1892 prev = dent; 1893 dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len)); 1894 } 1895 1896 dirent_del(dip, bh, prev, dent); 1897 if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1898 struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data; 1899 u16 entries = be16_to_cpu(leaf->lf_entries); 1900 if (!entries) 1901 gfs2_consist_inode(dip); 1902 leaf->lf_entries = cpu_to_be16(--entries); 1903 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 1904 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 1905 } 1906 brelse(bh); 1907 1908 if (!dip->i_entries) 1909 gfs2_consist_inode(dip); 1910 dip->i_entries--; 1911 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; 1912 if (d_is_dir(dentry)) 1913 drop_nlink(&dip->i_inode); 1914 mark_inode_dirty(&dip->i_inode); 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * gfs2_dir_mvino - Change inode number of directory entry 1921 * @dip: The GFS2 inode 1922 * @filename: 1923 * @new_inode: 1924 * 1925 * This routine changes the inode number of a directory entry. It's used 1926 * by rename to change ".." when a directory is moved. 1927 * Assumes a glock is held on dvp. 1928 * 1929 * Returns: errno 1930 */ 1931 1932 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 1933 const struct gfs2_inode *nip, unsigned int new_type) 1934 { 1935 struct buffer_head *bh; 1936 struct gfs2_dirent *dent; 1937 int error; 1938 1939 dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh); 1940 if (!dent) { 1941 gfs2_consist_inode(dip); 1942 return -EIO; 1943 } 1944 if (IS_ERR(dent)) 1945 return PTR_ERR(dent); 1946 1947 gfs2_trans_add_meta(dip->i_gl, bh); 1948 gfs2_inum_out(nip, dent); 1949 dent->de_type = cpu_to_be16(new_type); 1950 1951 if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1952 brelse(bh); 1953 error = gfs2_meta_inode_buffer(dip, &bh); 1954 if (error) 1955 return error; 1956 gfs2_trans_add_meta(dip->i_gl, bh); 1957 } 1958 1959 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; 1960 gfs2_dinode_out(dip, bh->b_data); 1961 brelse(bh); 1962 return 0; 1963 } 1964 1965 /** 1966 * leaf_dealloc - Deallocate a directory leaf 1967 * @dip: the directory 1968 * @index: the hash table offset in the directory 1969 * @len: the number of pointers to this leaf 1970 * @leaf_no: the leaf number 1971 * @leaf_bh: buffer_head for the starting leaf 1972 * last_dealloc: 1 if this is the final dealloc for the leaf, else 0 1973 * 1974 * Returns: errno 1975 */ 1976 1977 static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, 1978 u64 leaf_no, struct buffer_head *leaf_bh, 1979 int last_dealloc) 1980 { 1981 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 1982 struct gfs2_leaf *tmp_leaf; 1983 struct gfs2_rgrp_list rlist; 1984 struct buffer_head *bh, *dibh; 1985 u64 blk, nblk; 1986 unsigned int rg_blocks = 0, l_blocks = 0; 1987 char *ht; 1988 unsigned int x, size = len * sizeof(u64); 1989 int error; 1990 1991 error = gfs2_rindex_update(sdp); 1992 if (error) 1993 return error; 1994 1995 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1996 1997 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN); 1998 if (ht == NULL) 1999 ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO, 2000 PAGE_KERNEL); 2001 if (!ht) 2002 return -ENOMEM; 2003 2004 error = gfs2_quota_hold(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 2005 if (error) 2006 goto out; 2007 2008 /* Count the number of leaves */ 2009 bh = leaf_bh; 2010 2011 for (blk = leaf_no; blk; blk = nblk) { 2012 if (blk != leaf_no) { 2013 error = get_leaf(dip, blk, &bh); 2014 if (error) 2015 goto out_rlist; 2016 } 2017 tmp_leaf = (struct gfs2_leaf *)bh->b_data; 2018 nblk = be64_to_cpu(tmp_leaf->lf_next); 2019 if (blk != leaf_no) 2020 brelse(bh); 2021 2022 gfs2_rlist_add(dip, &rlist, blk); 2023 l_blocks++; 2024 } 2025 2026 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); 2027 2028 for (x = 0; x < rlist.rl_rgrps; x++) { 2029 struct gfs2_rgrpd *rgd; 2030 rgd = rlist.rl_ghs[x].gh_gl->gl_object; 2031 rg_blocks += rgd->rd_length; 2032 } 2033 2034 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); 2035 if (error) 2036 goto out_rlist; 2037 2038 error = gfs2_trans_begin(sdp, 2039 rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) + 2040 RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks); 2041 if (error) 2042 goto out_rg_gunlock; 2043 2044 bh = leaf_bh; 2045 2046 for (blk = leaf_no; blk; blk = nblk) { 2047 if (blk != leaf_no) { 2048 error = get_leaf(dip, blk, &bh); 2049 if (error) 2050 goto out_end_trans; 2051 } 2052 tmp_leaf = (struct gfs2_leaf *)bh->b_data; 2053 nblk = be64_to_cpu(tmp_leaf->lf_next); 2054 if (blk != leaf_no) 2055 brelse(bh); 2056 2057 gfs2_free_meta(dip, blk, 1); 2058 gfs2_add_inode_blocks(&dip->i_inode, -1); 2059 } 2060 2061 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); 2062 if (error != size) { 2063 if (error >= 0) 2064 error = -EIO; 2065 goto out_end_trans; 2066 } 2067 2068 error = gfs2_meta_inode_buffer(dip, &dibh); 2069 if (error) 2070 goto out_end_trans; 2071 2072 gfs2_trans_add_meta(dip->i_gl, dibh); 2073 /* On the last dealloc, make this a regular file in case we crash. 2074 (We don't want to free these blocks a second time.) */ 2075 if (last_dealloc) 2076 dip->i_inode.i_mode = S_IFREG; 2077 gfs2_dinode_out(dip, dibh->b_data); 2078 brelse(dibh); 2079 2080 out_end_trans: 2081 gfs2_trans_end(sdp); 2082 out_rg_gunlock: 2083 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); 2084 out_rlist: 2085 gfs2_rlist_free(&rlist); 2086 gfs2_quota_unhold(dip); 2087 out: 2088 kvfree(ht); 2089 return error; 2090 } 2091 2092 /** 2093 * gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory 2094 * @dip: the directory 2095 * 2096 * Dealloc all on-disk directory leaves to FREEMETA state 2097 * Change on-disk inode type to "regular file" 2098 * 2099 * Returns: errno 2100 */ 2101 2102 int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) 2103 { 2104 struct buffer_head *bh; 2105 struct gfs2_leaf *leaf; 2106 u32 hsize, len; 2107 u32 index = 0, next_index; 2108 __be64 *lp; 2109 u64 leaf_no; 2110 int error = 0, last; 2111 2112 hsize = 1 << dip->i_depth; 2113 2114 lp = gfs2_dir_get_hash_table(dip); 2115 if (IS_ERR(lp)) 2116 return PTR_ERR(lp); 2117 2118 while (index < hsize) { 2119 leaf_no = be64_to_cpu(lp[index]); 2120 if (leaf_no) { 2121 error = get_leaf(dip, leaf_no, &bh); 2122 if (error) 2123 goto out; 2124 leaf = (struct gfs2_leaf *)bh->b_data; 2125 len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth)); 2126 2127 next_index = (index & ~(len - 1)) + len; 2128 last = ((next_index >= hsize) ? 1 : 0); 2129 error = leaf_dealloc(dip, index, len, leaf_no, bh, 2130 last); 2131 brelse(bh); 2132 if (error) 2133 goto out; 2134 index = next_index; 2135 } else 2136 index++; 2137 } 2138 2139 if (index != hsize) { 2140 gfs2_consist_inode(dip); 2141 error = -EIO; 2142 } 2143 2144 out: 2145 2146 return error; 2147 } 2148 2149 /** 2150 * gfs2_diradd_alloc_required - find if adding entry will require an allocation 2151 * @ip: the file being written to 2152 * @filname: the filename that's going to be added 2153 * @da: The structure to return dir alloc info 2154 * 2155 * Returns: 0 if ok, -ve on error 2156 */ 2157 2158 int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name, 2159 struct gfs2_diradd *da) 2160 { 2161 struct gfs2_inode *ip = GFS2_I(inode); 2162 struct gfs2_sbd *sdp = GFS2_SB(inode); 2163 const unsigned int extra = sizeof(struct gfs2_dinode) - sizeof(struct gfs2_leaf); 2164 struct gfs2_dirent *dent; 2165 struct buffer_head *bh; 2166 2167 da->nr_blocks = 0; 2168 da->bh = NULL; 2169 da->dent = NULL; 2170 2171 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh); 2172 if (!dent) { 2173 da->nr_blocks = sdp->sd_max_dirres; 2174 if (!(ip->i_diskflags & GFS2_DIF_EXHASH) && 2175 (GFS2_DIRENT_SIZE(name->len) < extra)) 2176 da->nr_blocks = 1; 2177 return 0; 2178 } 2179 if (IS_ERR(dent)) 2180 return PTR_ERR(dent); 2181 2182 if (da->save_loc) { 2183 da->bh = bh; 2184 da->dent = dent; 2185 } else { 2186 brelse(bh); 2187 } 2188 return 0; 2189 } 2190 2191