1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) International Business Machines Corp., 2000-2004 4 */ 5 6 /* 7 * jfs_dtree.c: directory B+-tree manager 8 * 9 * B+-tree with variable length key directory: 10 * 11 * each directory page is structured as an array of 32-byte 12 * directory entry slots initialized as a freelist 13 * to avoid search/compaction of free space at insertion. 14 * when an entry is inserted, a number of slots are allocated 15 * from the freelist as required to store variable length data 16 * of the entry; when the entry is deleted, slots of the entry 17 * are returned to freelist. 18 * 19 * leaf entry stores full name as key and file serial number 20 * (aka inode number) as data. 21 * internal/router entry stores sufffix compressed name 22 * as key and simple extent descriptor as data. 23 * 24 * each directory page maintains a sorted entry index table 25 * which stores the start slot index of sorted entries 26 * to allow binary search on the table. 27 * 28 * directory starts as a root/leaf page in on-disk inode 29 * inline data area. 30 * when it becomes full, it starts a leaf of a external extent 31 * of length of 1 block. each time the first leaf becomes full, 32 * it is extended rather than split (its size is doubled), 33 * until its length becoms 4 KBytes, from then the extent is split 34 * with new 4 Kbyte extent when it becomes full 35 * to reduce external fragmentation of small directories. 36 * 37 * blah, blah, blah, for linear scan of directory in pieces by 38 * readdir(). 39 * 40 * 41 * case-insensitive directory file system 42 * 43 * names are stored in case-sensitive way in leaf entry. 44 * but stored, searched and compared in case-insensitive (uppercase) order 45 * (i.e., both search key and entry key are folded for search/compare): 46 * (note that case-sensitive order is BROKEN in storage, e.g., 47 * sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad 48 * 49 * entries which folds to the same key makes up a equivalent class 50 * whose members are stored as contiguous cluster (may cross page boundary) 51 * but whose order is arbitrary and acts as duplicate, e.g., 52 * abc, Abc, aBc, abC) 53 * 54 * once match is found at leaf, requires scan forward/backward 55 * either for, in case-insensitive search, duplicate 56 * or for, in case-sensitive search, for exact match 57 * 58 * router entry must be created/stored in case-insensitive way 59 * in internal entry: 60 * (right most key of left page and left most key of right page 61 * are folded, and its suffix compression is propagated as router 62 * key in parent) 63 * (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB> 64 * should be made the router key for the split) 65 * 66 * case-insensitive search: 67 * 68 * fold search key; 69 * 70 * case-insensitive search of B-tree: 71 * for internal entry, router key is already folded; 72 * for leaf entry, fold the entry key before comparison. 73 * 74 * if (leaf entry case-insensitive match found) 75 * if (next entry satisfies case-insensitive match) 76 * return EDUPLICATE; 77 * if (prev entry satisfies case-insensitive match) 78 * return EDUPLICATE; 79 * return match; 80 * else 81 * return no match; 82 * 83 * serialization: 84 * target directory inode lock is being held on entry/exit 85 * of all main directory service routines. 86 * 87 * log based recovery: 88 */ 89 90 #include <linux/fs.h> 91 #include <linux/quotaops.h> 92 #include <linux/slab.h> 93 #include "jfs_incore.h" 94 #include "jfs_superblock.h" 95 #include "jfs_filsys.h" 96 #include "jfs_metapage.h" 97 #include "jfs_dmap.h" 98 #include "jfs_unicode.h" 99 #include "jfs_debug.h" 100 101 /* dtree split parameter */ 102 struct dtsplit { 103 struct metapage *mp; 104 s16 index; 105 s16 nslot; 106 struct component_name *key; 107 ddata_t *data; 108 struct pxdlist *pxdlist; 109 }; 110 111 #define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot) 112 113 /* get page buffer for specified block address */ 114 #define DT_GETPAGE(IP, BN, MP, SIZE, P, RC) \ 115 do { \ 116 BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot); \ 117 if (!(RC)) { \ 118 if (((P)->header.nextindex > \ 119 (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \ 120 ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \ 121 ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \ 122 BT_PUTPAGE(MP); \ 123 jfs_error((IP)->i_sb, \ 124 "DT_GETPAGE: dtree page corrupt\n"); \ 125 MP = NULL; \ 126 RC = -EIO; \ 127 } \ 128 } \ 129 } while (0) 130 131 /* for consistency */ 132 #define DT_PUTPAGE(MP) BT_PUTPAGE(MP) 133 134 #define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \ 135 BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot) 136 137 /* 138 * forward references 139 */ 140 static int dtSplitUp(tid_t tid, struct inode *ip, 141 struct dtsplit * split, struct btstack * btstack); 142 143 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, 144 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp); 145 146 static int dtExtendPage(tid_t tid, struct inode *ip, 147 struct dtsplit * split, struct btstack * btstack); 148 149 static int dtSplitRoot(tid_t tid, struct inode *ip, 150 struct dtsplit * split, struct metapage ** rmpp); 151 152 static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp, 153 dtpage_t * fp, struct btstack * btstack); 154 155 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p); 156 157 static int dtReadFirst(struct inode *ip, struct btstack * btstack); 158 159 static int dtReadNext(struct inode *ip, 160 loff_t * offset, struct btstack * btstack); 161 162 static int dtCompare(struct component_name * key, dtpage_t * p, int si); 163 164 static int ciCompare(struct component_name * key, dtpage_t * p, int si, 165 int flag); 166 167 static void dtGetKey(dtpage_t * p, int i, struct component_name * key, 168 int flag); 169 170 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp, 171 int ri, struct component_name * key, int flag); 172 173 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key, 174 ddata_t * data, struct dt_lock **); 175 176 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp, 177 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock, 178 int do_index); 179 180 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock); 181 182 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock); 183 184 static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock); 185 186 #define ciToUpper(c) UniStrupr((c)->name) 187 188 /* 189 * read_index_page() 190 * 191 * Reads a page of a directory's index table. 192 * Having metadata mapped into the directory inode's address space 193 * presents a multitude of problems. We avoid this by mapping to 194 * the absolute address space outside of the *_metapage routines 195 */ 196 static struct metapage *read_index_page(struct inode *inode, s64 blkno) 197 { 198 int rc; 199 s64 xaddr; 200 int xflag; 201 s32 xlen; 202 203 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1); 204 if (rc || (xaddr == 0)) 205 return NULL; 206 207 return read_metapage(inode, xaddr, PSIZE, 1); 208 } 209 210 /* 211 * get_index_page() 212 * 213 * Same as get_index_page(), but get's a new page without reading 214 */ 215 static struct metapage *get_index_page(struct inode *inode, s64 blkno) 216 { 217 int rc; 218 s64 xaddr; 219 int xflag; 220 s32 xlen; 221 222 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1); 223 if (rc || (xaddr == 0)) 224 return NULL; 225 226 return get_metapage(inode, xaddr, PSIZE, 1); 227 } 228 229 /* 230 * find_index() 231 * 232 * Returns dtree page containing directory table entry for specified 233 * index and pointer to its entry. 234 * 235 * mp must be released by caller. 236 */ 237 static struct dir_table_slot *find_index(struct inode *ip, u32 index, 238 struct metapage ** mp, s64 *lblock) 239 { 240 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 241 s64 blkno; 242 s64 offset; 243 int page_offset; 244 struct dir_table_slot *slot; 245 static int maxWarnings = 10; 246 247 if (index < 2) { 248 if (maxWarnings) { 249 jfs_warn("find_entry called with index = %d", index); 250 maxWarnings--; 251 } 252 return NULL; 253 } 254 255 if (index >= jfs_ip->next_index) { 256 jfs_warn("find_entry called with index >= next_index"); 257 return NULL; 258 } 259 260 if (jfs_dirtable_inline(ip)) { 261 /* 262 * Inline directory table 263 */ 264 *mp = NULL; 265 slot = &jfs_ip->i_dirtable[index - 2]; 266 } else { 267 offset = (index - 2) * sizeof(struct dir_table_slot); 268 page_offset = offset & (PSIZE - 1); 269 blkno = ((offset + 1) >> L2PSIZE) << 270 JFS_SBI(ip->i_sb)->l2nbperpage; 271 272 if (*mp && (*lblock != blkno)) { 273 release_metapage(*mp); 274 *mp = NULL; 275 } 276 if (!(*mp)) { 277 *lblock = blkno; 278 *mp = read_index_page(ip, blkno); 279 } 280 if (!(*mp)) { 281 jfs_err("free_index: error reading directory table"); 282 return NULL; 283 } 284 285 slot = 286 (struct dir_table_slot *) ((char *) (*mp)->data + 287 page_offset); 288 } 289 return slot; 290 } 291 292 static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp, 293 u32 index) 294 { 295 struct tlock *tlck; 296 struct linelock *llck; 297 struct lv *lv; 298 299 tlck = txLock(tid, ip, mp, tlckDATA); 300 llck = (struct linelock *) tlck->lock; 301 302 if (llck->index >= llck->maxcnt) 303 llck = txLinelock(llck); 304 lv = &llck->lv[llck->index]; 305 306 /* 307 * Linelock slot size is twice the size of directory table 308 * slot size. 512 entries per page. 309 */ 310 lv->offset = ((index - 2) & 511) >> 1; 311 lv->length = 1; 312 llck->index++; 313 } 314 315 /* 316 * add_index() 317 * 318 * Adds an entry to the directory index table. This is used to provide 319 * each directory entry with a persistent index in which to resume 320 * directory traversals 321 */ 322 static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) 323 { 324 struct super_block *sb = ip->i_sb; 325 struct jfs_sb_info *sbi = JFS_SBI(sb); 326 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 327 u64 blkno; 328 struct dir_table_slot *dirtab_slot; 329 u32 index; 330 struct linelock *llck; 331 struct lv *lv; 332 struct metapage *mp; 333 s64 offset; 334 uint page_offset; 335 struct tlock *tlck; 336 s64 xaddr; 337 338 ASSERT(DO_INDEX(ip)); 339 340 if (jfs_ip->next_index < 2) { 341 jfs_warn("add_index: next_index = %d. Resetting!", 342 jfs_ip->next_index); 343 jfs_ip->next_index = 2; 344 } 345 346 index = jfs_ip->next_index++; 347 348 if (index <= MAX_INLINE_DIRTABLE_ENTRY) { 349 /* 350 * i_size reflects size of index table, or 8 bytes per entry. 351 */ 352 ip->i_size = (loff_t) (index - 1) << 3; 353 354 /* 355 * dir table fits inline within inode 356 */ 357 dirtab_slot = &jfs_ip->i_dirtable[index-2]; 358 dirtab_slot->flag = DIR_INDEX_VALID; 359 dirtab_slot->slot = slot; 360 DTSaddress(dirtab_slot, bn); 361 362 set_cflag(COMMIT_Dirtable, ip); 363 364 return index; 365 } 366 if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) { 367 struct dir_table_slot temp_table[12]; 368 369 /* 370 * It's time to move the inline table to an external 371 * page and begin to build the xtree 372 */ 373 if (dquot_alloc_block(ip, sbi->nbperpage)) 374 goto clean_up; 375 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { 376 dquot_free_block(ip, sbi->nbperpage); 377 goto clean_up; 378 } 379 380 /* 381 * Save the table, we're going to overwrite it with the 382 * xtree root 383 */ 384 memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table)); 385 386 /* 387 * Initialize empty x-tree 388 */ 389 xtInitRoot(tid, ip); 390 391 /* 392 * Add the first block to the xtree 393 */ 394 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { 395 /* This really shouldn't fail */ 396 jfs_warn("add_index: xtInsert failed!"); 397 memcpy(&jfs_ip->i_dirtable, temp_table, 398 sizeof (temp_table)); 399 dbFree(ip, xaddr, sbi->nbperpage); 400 dquot_free_block(ip, sbi->nbperpage); 401 goto clean_up; 402 } 403 ip->i_size = PSIZE; 404 405 mp = get_index_page(ip, 0); 406 if (!mp) { 407 jfs_err("add_index: get_metapage failed!"); 408 xtTruncate(tid, ip, 0, COMMIT_PWMAP); 409 memcpy(&jfs_ip->i_dirtable, temp_table, 410 sizeof (temp_table)); 411 goto clean_up; 412 } 413 tlck = txLock(tid, ip, mp, tlckDATA); 414 llck = (struct linelock *) & tlck->lock; 415 ASSERT(llck->index == 0); 416 lv = &llck->lv[0]; 417 418 lv->offset = 0; 419 lv->length = 6; /* tlckDATA slot size is 16 bytes */ 420 llck->index++; 421 422 memcpy(mp->data, temp_table, sizeof(temp_table)); 423 424 mark_metapage_dirty(mp); 425 release_metapage(mp); 426 427 /* 428 * Logging is now directed by xtree tlocks 429 */ 430 clear_cflag(COMMIT_Dirtable, ip); 431 } 432 433 offset = (index - 2) * sizeof(struct dir_table_slot); 434 page_offset = offset & (PSIZE - 1); 435 blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage; 436 if (page_offset == 0) { 437 /* 438 * This will be the beginning of a new page 439 */ 440 xaddr = 0; 441 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) { 442 jfs_warn("add_index: xtInsert failed!"); 443 goto clean_up; 444 } 445 ip->i_size += PSIZE; 446 447 if ((mp = get_index_page(ip, blkno))) 448 memset(mp->data, 0, PSIZE); /* Just looks better */ 449 else 450 xtTruncate(tid, ip, offset, COMMIT_PWMAP); 451 } else 452 mp = read_index_page(ip, blkno); 453 454 if (!mp) { 455 jfs_err("add_index: get/read_metapage failed!"); 456 goto clean_up; 457 } 458 459 lock_index(tid, ip, mp, index); 460 461 dirtab_slot = 462 (struct dir_table_slot *) ((char *) mp->data + page_offset); 463 dirtab_slot->flag = DIR_INDEX_VALID; 464 dirtab_slot->slot = slot; 465 DTSaddress(dirtab_slot, bn); 466 467 mark_metapage_dirty(mp); 468 release_metapage(mp); 469 470 return index; 471 472 clean_up: 473 474 jfs_ip->next_index--; 475 476 return 0; 477 } 478 479 /* 480 * free_index() 481 * 482 * Marks an entry to the directory index table as free. 483 */ 484 static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next) 485 { 486 struct dir_table_slot *dirtab_slot; 487 s64 lblock; 488 struct metapage *mp = NULL; 489 490 dirtab_slot = find_index(ip, index, &mp, &lblock); 491 492 if (!dirtab_slot) 493 return; 494 495 dirtab_slot->flag = DIR_INDEX_FREE; 496 dirtab_slot->slot = dirtab_slot->addr1 = 0; 497 dirtab_slot->addr2 = cpu_to_le32(next); 498 499 if (mp) { 500 lock_index(tid, ip, mp, index); 501 mark_metapage_dirty(mp); 502 release_metapage(mp); 503 } else 504 set_cflag(COMMIT_Dirtable, ip); 505 } 506 507 /* 508 * modify_index() 509 * 510 * Changes an entry in the directory index table 511 */ 512 static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn, 513 int slot, struct metapage ** mp, s64 *lblock) 514 { 515 struct dir_table_slot *dirtab_slot; 516 517 dirtab_slot = find_index(ip, index, mp, lblock); 518 519 if (!dirtab_slot) 520 return; 521 522 DTSaddress(dirtab_slot, bn); 523 dirtab_slot->slot = slot; 524 525 if (*mp) { 526 lock_index(tid, ip, *mp, index); 527 mark_metapage_dirty(*mp); 528 } else 529 set_cflag(COMMIT_Dirtable, ip); 530 } 531 532 /* 533 * read_index() 534 * 535 * reads a directory table slot 536 */ 537 static int read_index(struct inode *ip, u32 index, 538 struct dir_table_slot * dirtab_slot) 539 { 540 s64 lblock; 541 struct metapage *mp = NULL; 542 struct dir_table_slot *slot; 543 544 slot = find_index(ip, index, &mp, &lblock); 545 if (!slot) { 546 return -EIO; 547 } 548 549 memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot)); 550 551 if (mp) 552 release_metapage(mp); 553 554 return 0; 555 } 556 557 /* 558 * dtSearch() 559 * 560 * function: 561 * Search for the entry with specified key 562 * 563 * parameter: 564 * 565 * return: 0 - search result on stack, leaf page pinned; 566 * errno - I/O error 567 */ 568 int dtSearch(struct inode *ip, struct component_name * key, ino_t * data, 569 struct btstack * btstack, int flag) 570 { 571 int rc = 0; 572 int cmp = 1; /* init for empty page */ 573 s64 bn; 574 struct metapage *mp; 575 dtpage_t *p; 576 s8 *stbl; 577 int base, index, lim; 578 struct btframe *btsp; 579 pxd_t *pxd; 580 int psize = 288; /* initial in-line directory */ 581 ino_t inumber; 582 struct component_name ciKey; 583 struct super_block *sb = ip->i_sb; 584 585 ciKey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t), 586 GFP_NOFS); 587 if (!ciKey.name) { 588 rc = -ENOMEM; 589 goto dtSearch_Exit2; 590 } 591 592 593 /* uppercase search key for c-i directory */ 594 UniStrcpy(ciKey.name, key->name); 595 ciKey.namlen = key->namlen; 596 597 /* only uppercase if case-insensitive support is on */ 598 if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) { 599 ciToUpper(&ciKey); 600 } 601 BT_CLR(btstack); /* reset stack */ 602 603 /* init level count for max pages to split */ 604 btstack->nsplit = 1; 605 606 /* 607 * search down tree from root: 608 * 609 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of 610 * internal page, child page Pi contains entry with k, Ki <= K < Kj. 611 * 612 * if entry with search key K is not found 613 * internal page search find the entry with largest key Ki 614 * less than K which point to the child page to search; 615 * leaf page search find the entry with smallest key Kj 616 * greater than K so that the returned index is the position of 617 * the entry to be shifted right for insertion of new entry. 618 * for empty tree, search key is greater than any key of the tree. 619 * 620 * by convention, root bn = 0. 621 */ 622 for (bn = 0;;) { 623 /* get/pin the page to search */ 624 DT_GETPAGE(ip, bn, mp, psize, p, rc); 625 if (rc) 626 goto dtSearch_Exit1; 627 628 /* get sorted entry table of the page */ 629 stbl = DT_GETSTBL(p); 630 631 /* 632 * binary search with search key K on the current page. 633 */ 634 for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) { 635 index = base + (lim >> 1); 636 637 if (stbl[index] < 0) { 638 rc = -EIO; 639 goto out; 640 } 641 642 if (p->header.flag & BT_LEAF) { 643 /* uppercase leaf name to compare */ 644 cmp = 645 ciCompare(&ciKey, p, stbl[index], 646 JFS_SBI(sb)->mntflag); 647 } else { 648 /* router key is in uppercase */ 649 650 cmp = dtCompare(&ciKey, p, stbl[index]); 651 652 653 } 654 if (cmp == 0) { 655 /* 656 * search hit 657 */ 658 /* search hit - leaf page: 659 * return the entry found 660 */ 661 if (p->header.flag & BT_LEAF) { 662 inumber = le32_to_cpu( 663 ((struct ldtentry *) & p->slot[stbl[index]])->inumber); 664 665 /* 666 * search for JFS_LOOKUP 667 */ 668 if (flag == JFS_LOOKUP) { 669 *data = inumber; 670 rc = 0; 671 goto out; 672 } 673 674 /* 675 * search for JFS_CREATE 676 */ 677 if (flag == JFS_CREATE) { 678 *data = inumber; 679 rc = -EEXIST; 680 goto out; 681 } 682 683 /* 684 * search for JFS_REMOVE or JFS_RENAME 685 */ 686 if ((flag == JFS_REMOVE || 687 flag == JFS_RENAME) && 688 *data != inumber) { 689 rc = -ESTALE; 690 goto out; 691 } 692 693 /* 694 * JFS_REMOVE|JFS_FINDDIR|JFS_RENAME 695 */ 696 /* save search result */ 697 *data = inumber; 698 btsp = btstack->top; 699 btsp->bn = bn; 700 btsp->index = index; 701 btsp->mp = mp; 702 703 rc = 0; 704 goto dtSearch_Exit1; 705 } 706 707 /* search hit - internal page: 708 * descend/search its child page 709 */ 710 goto getChild; 711 } 712 713 if (cmp > 0) { 714 base = index + 1; 715 --lim; 716 } 717 } 718 719 /* 720 * search miss 721 * 722 * base is the smallest index with key (Kj) greater than 723 * search key (K) and may be zero or (maxindex + 1) index. 724 */ 725 /* 726 * search miss - leaf page 727 * 728 * return location of entry (base) where new entry with 729 * search key K is to be inserted. 730 */ 731 if (p->header.flag & BT_LEAF) { 732 /* 733 * search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME 734 */ 735 if (flag == JFS_LOOKUP || flag == JFS_REMOVE || 736 flag == JFS_RENAME) { 737 rc = -ENOENT; 738 goto out; 739 } 740 741 /* 742 * search for JFS_CREATE|JFS_FINDDIR: 743 * 744 * save search result 745 */ 746 *data = 0; 747 btsp = btstack->top; 748 btsp->bn = bn; 749 btsp->index = base; 750 btsp->mp = mp; 751 752 rc = 0; 753 goto dtSearch_Exit1; 754 } 755 756 /* 757 * search miss - internal page 758 * 759 * if base is non-zero, decrement base by one to get the parent 760 * entry of the child page to search. 761 */ 762 index = base ? base - 1 : base; 763 764 /* 765 * go down to child page 766 */ 767 getChild: 768 /* update max. number of pages to split */ 769 if (BT_STACK_FULL(btstack)) { 770 /* Something's corrupted, mark filesystem dirty so 771 * chkdsk will fix it. 772 */ 773 jfs_error(sb, "stack overrun!\n"); 774 BT_STACK_DUMP(btstack); 775 rc = -EIO; 776 goto out; 777 } 778 btstack->nsplit++; 779 780 /* push (bn, index) of the parent page/entry */ 781 BT_PUSH(btstack, bn, index); 782 783 /* get the child page block number */ 784 pxd = (pxd_t *) & p->slot[stbl[index]]; 785 bn = addressPXD(pxd); 786 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize; 787 788 /* unpin the parent page */ 789 DT_PUTPAGE(mp); 790 } 791 792 out: 793 DT_PUTPAGE(mp); 794 795 dtSearch_Exit1: 796 797 kfree(ciKey.name); 798 799 dtSearch_Exit2: 800 801 return rc; 802 } 803 804 805 /* 806 * dtInsert() 807 * 808 * function: insert an entry to directory tree 809 * 810 * parameter: 811 * 812 * return: 0 - success; 813 * errno - failure; 814 */ 815 int dtInsert(tid_t tid, struct inode *ip, 816 struct component_name * name, ino_t * fsn, struct btstack * btstack) 817 { 818 int rc = 0; 819 struct metapage *mp; /* meta-page buffer */ 820 dtpage_t *p; /* base B+-tree index page */ 821 s64 bn; 822 int index; 823 struct dtsplit split; /* split information */ 824 ddata_t data; 825 struct dt_lock *dtlck; 826 int n; 827 struct tlock *tlck; 828 struct lv *lv; 829 830 /* 831 * retrieve search result 832 * 833 * dtSearch() returns (leaf page pinned, index at which to insert). 834 * n.b. dtSearch() may return index of (maxindex + 1) of 835 * the full page. 836 */ 837 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index); 838 if (p->header.freelist == 0) 839 return -EINVAL; 840 841 /* 842 * insert entry for new key 843 */ 844 if (DO_INDEX(ip)) { 845 if (JFS_IP(ip)->next_index == DIREND) { 846 DT_PUTPAGE(mp); 847 return -EMLINK; 848 } 849 n = NDTLEAF(name->namlen); 850 data.leaf.tid = tid; 851 data.leaf.ip = ip; 852 } else { 853 n = NDTLEAF_LEGACY(name->namlen); 854 data.leaf.ip = NULL; /* signifies legacy directory format */ 855 } 856 data.leaf.ino = *fsn; 857 858 /* 859 * leaf page does not have enough room for new entry: 860 * 861 * extend/split the leaf page; 862 * 863 * dtSplitUp() will insert the entry and unpin the leaf page. 864 */ 865 if (n > p->header.freecnt) { 866 split.mp = mp; 867 split.index = index; 868 split.nslot = n; 869 split.key = name; 870 split.data = &data; 871 rc = dtSplitUp(tid, ip, &split, btstack); 872 return rc; 873 } 874 875 /* 876 * leaf page does have enough room for new entry: 877 * 878 * insert the new data entry into the leaf page; 879 */ 880 BT_MARK_DIRTY(mp, ip); 881 /* 882 * acquire a transaction lock on the leaf page 883 */ 884 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); 885 dtlck = (struct dt_lock *) & tlck->lock; 886 ASSERT(dtlck->index == 0); 887 lv = & dtlck->lv[0]; 888 889 /* linelock header */ 890 lv->offset = 0; 891 lv->length = 1; 892 dtlck->index++; 893 894 dtInsertEntry(p, index, name, &data, &dtlck); 895 896 /* linelock stbl of non-root leaf page */ 897 if (!(p->header.flag & BT_ROOT)) { 898 if (dtlck->index >= dtlck->maxcnt) 899 dtlck = (struct dt_lock *) txLinelock(dtlck); 900 lv = & dtlck->lv[dtlck->index]; 901 n = index >> L2DTSLOTSIZE; 902 lv->offset = p->header.stblindex + n; 903 lv->length = 904 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1; 905 dtlck->index++; 906 } 907 908 /* unpin the leaf page */ 909 DT_PUTPAGE(mp); 910 911 return 0; 912 } 913 914 915 /* 916 * dtSplitUp() 917 * 918 * function: propagate insertion bottom up; 919 * 920 * parameter: 921 * 922 * return: 0 - success; 923 * errno - failure; 924 * leaf page unpinned; 925 */ 926 static int dtSplitUp(tid_t tid, 927 struct inode *ip, struct dtsplit * split, struct btstack * btstack) 928 { 929 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); 930 int rc = 0; 931 struct metapage *smp; 932 dtpage_t *sp; /* split page */ 933 struct metapage *rmp; 934 dtpage_t *rp; /* new right page split from sp */ 935 pxd_t rpxd; /* new right page extent descriptor */ 936 struct metapage *lmp; 937 dtpage_t *lp; /* left child page */ 938 int skip; /* index of entry of insertion */ 939 struct btframe *parent; /* parent page entry on traverse stack */ 940 s64 xaddr, nxaddr; 941 int xlen, xsize; 942 struct pxdlist pxdlist; 943 pxd_t *pxd; 944 struct component_name key = { 0, NULL }; 945 ddata_t *data = split->data; 946 int n; 947 struct dt_lock *dtlck; 948 struct tlock *tlck; 949 struct lv *lv; 950 int quota_allocation = 0; 951 952 /* get split page */ 953 smp = split->mp; 954 sp = DT_PAGE(ip, smp); 955 956 key.name = kmalloc_array(JFS_NAME_MAX + 2, sizeof(wchar_t), GFP_NOFS); 957 if (!key.name) { 958 DT_PUTPAGE(smp); 959 rc = -ENOMEM; 960 goto dtSplitUp_Exit; 961 } 962 963 /* 964 * split leaf page 965 * 966 * The split routines insert the new entry, and 967 * acquire txLock as appropriate. 968 */ 969 /* 970 * split root leaf page: 971 */ 972 if (sp->header.flag & BT_ROOT) { 973 /* 974 * allocate a single extent child page 975 */ 976 xlen = 1; 977 n = sbi->bsize >> L2DTSLOTSIZE; 978 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */ 979 n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */ 980 if (n <= split->nslot) 981 xlen++; 982 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) { 983 DT_PUTPAGE(smp); 984 goto freeKeyName; 985 } 986 987 pxdlist.maxnpxd = 1; 988 pxdlist.npxd = 0; 989 pxd = &pxdlist.pxd[0]; 990 PXDaddress(pxd, xaddr); 991 PXDlength(pxd, xlen); 992 split->pxdlist = &pxdlist; 993 rc = dtSplitRoot(tid, ip, split, &rmp); 994 995 if (rc) 996 dbFree(ip, xaddr, xlen); 997 else 998 DT_PUTPAGE(rmp); 999 1000 DT_PUTPAGE(smp); 1001 1002 if (!DO_INDEX(ip)) 1003 ip->i_size = xlen << sbi->l2bsize; 1004 1005 goto freeKeyName; 1006 } 1007 1008 /* 1009 * extend first leaf page 1010 * 1011 * extend the 1st extent if less than buffer page size 1012 * (dtExtendPage() reurns leaf page unpinned) 1013 */ 1014 pxd = &sp->header.self; 1015 xlen = lengthPXD(pxd); 1016 xsize = xlen << sbi->l2bsize; 1017 if (xsize < PSIZE) { 1018 xaddr = addressPXD(pxd); 1019 n = xsize >> L2DTSLOTSIZE; 1020 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */ 1021 if ((n + sp->header.freecnt) <= split->nslot) 1022 n = xlen + (xlen << 1); 1023 else 1024 n = xlen; 1025 1026 /* Allocate blocks to quota. */ 1027 rc = dquot_alloc_block(ip, n); 1028 if (rc) 1029 goto extendOut; 1030 quota_allocation += n; 1031 1032 if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen, 1033 (s64) n, &nxaddr))) 1034 goto extendOut; 1035 1036 pxdlist.maxnpxd = 1; 1037 pxdlist.npxd = 0; 1038 pxd = &pxdlist.pxd[0]; 1039 PXDaddress(pxd, nxaddr); 1040 PXDlength(pxd, xlen + n); 1041 split->pxdlist = &pxdlist; 1042 if ((rc = dtExtendPage(tid, ip, split, btstack))) { 1043 nxaddr = addressPXD(pxd); 1044 if (xaddr != nxaddr) { 1045 /* free relocated extent */ 1046 xlen = lengthPXD(pxd); 1047 dbFree(ip, nxaddr, (s64) xlen); 1048 } else { 1049 /* free extended delta */ 1050 xlen = lengthPXD(pxd) - n; 1051 xaddr = addressPXD(pxd) + xlen; 1052 dbFree(ip, xaddr, (s64) n); 1053 } 1054 } else if (!DO_INDEX(ip)) 1055 ip->i_size = lengthPXD(pxd) << sbi->l2bsize; 1056 1057 1058 extendOut: 1059 DT_PUTPAGE(smp); 1060 goto freeKeyName; 1061 } 1062 1063 /* 1064 * split leaf page <sp> into <sp> and a new right page <rp>. 1065 * 1066 * return <rp> pinned and its extent descriptor <rpxd> 1067 */ 1068 /* 1069 * allocate new directory page extent and 1070 * new index page(s) to cover page split(s) 1071 * 1072 * allocation hint: ? 1073 */ 1074 n = btstack->nsplit; 1075 pxdlist.maxnpxd = pxdlist.npxd = 0; 1076 xlen = sbi->nbperpage; 1077 for (pxd = pxdlist.pxd; n > 0; n--, pxd++) { 1078 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) { 1079 PXDaddress(pxd, xaddr); 1080 PXDlength(pxd, xlen); 1081 pxdlist.maxnpxd++; 1082 continue; 1083 } 1084 1085 DT_PUTPAGE(smp); 1086 1087 /* undo allocation */ 1088 goto splitOut; 1089 } 1090 1091 split->pxdlist = &pxdlist; 1092 if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) { 1093 DT_PUTPAGE(smp); 1094 1095 /* undo allocation */ 1096 goto splitOut; 1097 } 1098 1099 if (!DO_INDEX(ip)) 1100 ip->i_size += PSIZE; 1101 1102 /* 1103 * propagate up the router entry for the leaf page just split 1104 * 1105 * insert a router entry for the new page into the parent page, 1106 * propagate the insert/split up the tree by walking back the stack 1107 * of (bn of parent page, index of child page entry in parent page) 1108 * that were traversed during the search for the page that split. 1109 * 1110 * the propagation of insert/split up the tree stops if the root 1111 * splits or the page inserted into doesn't have to split to hold 1112 * the new entry. 1113 * 1114 * the parent entry for the split page remains the same, and 1115 * a new entry is inserted at its right with the first key and 1116 * block number of the new right page. 1117 * 1118 * There are a maximum of 4 pages pinned at any time: 1119 * two children, left parent and right parent (when the parent splits). 1120 * keep the child pages pinned while working on the parent. 1121 * make sure that all pins are released at exit. 1122 */ 1123 while ((parent = BT_POP(btstack)) != NULL) { 1124 /* parent page specified by stack frame <parent> */ 1125 1126 /* keep current child pages (<lp>, <rp>) pinned */ 1127 lmp = smp; 1128 lp = sp; 1129 1130 /* 1131 * insert router entry in parent for new right child page <rp> 1132 */ 1133 /* get the parent page <sp> */ 1134 DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc); 1135 if (rc) { 1136 DT_PUTPAGE(lmp); 1137 DT_PUTPAGE(rmp); 1138 goto splitOut; 1139 } 1140 1141 /* 1142 * The new key entry goes ONE AFTER the index of parent entry, 1143 * because the split was to the right. 1144 */ 1145 skip = parent->index + 1; 1146 1147 /* 1148 * compute the key for the router entry 1149 * 1150 * key suffix compression: 1151 * for internal pages that have leaf pages as children, 1152 * retain only what's needed to distinguish between 1153 * the new entry and the entry on the page to its left. 1154 * If the keys compare equal, retain the entire key. 1155 * 1156 * note that compression is performed only at computing 1157 * router key at the lowest internal level. 1158 * further compression of the key between pairs of higher 1159 * level internal pages loses too much information and 1160 * the search may fail. 1161 * (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,} 1162 * results in two adjacent parent entries (a)(xx). 1163 * if split occurs between these two entries, and 1164 * if compression is applied, the router key of parent entry 1165 * of right page (x) will divert search for x into right 1166 * subtree and miss x in the left subtree.) 1167 * 1168 * the entire key must be retained for the next-to-leftmost 1169 * internal key at any level of the tree, or search may fail 1170 * (e.g., ?) 1171 */ 1172 switch (rp->header.flag & BT_TYPE) { 1173 case BT_LEAF: 1174 /* 1175 * compute the length of prefix for suffix compression 1176 * between last entry of left page and first entry 1177 * of right page 1178 */ 1179 if ((sp->header.flag & BT_ROOT && skip > 1) || 1180 sp->header.prev != 0 || skip > 1) { 1181 /* compute uppercase router prefix key */ 1182 rc = ciGetLeafPrefixKey(lp, 1183 lp->header.nextindex-1, 1184 rp, 0, &key, 1185 sbi->mntflag); 1186 if (rc) { 1187 DT_PUTPAGE(lmp); 1188 DT_PUTPAGE(rmp); 1189 DT_PUTPAGE(smp); 1190 goto splitOut; 1191 } 1192 } else { 1193 /* next to leftmost entry of 1194 lowest internal level */ 1195 1196 /* compute uppercase router key */ 1197 dtGetKey(rp, 0, &key, sbi->mntflag); 1198 key.name[key.namlen] = 0; 1199 1200 if ((sbi->mntflag & JFS_OS2) == JFS_OS2) 1201 ciToUpper(&key); 1202 } 1203 1204 n = NDTINTERNAL(key.namlen); 1205 break; 1206 1207 case BT_INTERNAL: 1208 dtGetKey(rp, 0, &key, sbi->mntflag); 1209 n = NDTINTERNAL(key.namlen); 1210 break; 1211 1212 default: 1213 jfs_err("dtSplitUp(): UFO!"); 1214 break; 1215 } 1216 1217 /* unpin left child page */ 1218 DT_PUTPAGE(lmp); 1219 1220 /* 1221 * compute the data for the router entry 1222 */ 1223 data->xd = rpxd; /* child page xd */ 1224 1225 /* 1226 * parent page is full - split the parent page 1227 */ 1228 if (n > sp->header.freecnt) { 1229 /* init for parent page split */ 1230 split->mp = smp; 1231 split->index = skip; /* index at insert */ 1232 split->nslot = n; 1233 split->key = &key; 1234 /* split->data = data; */ 1235 1236 /* unpin right child page */ 1237 DT_PUTPAGE(rmp); 1238 1239 /* The split routines insert the new entry, 1240 * acquire txLock as appropriate. 1241 * return <rp> pinned and its block number <rbn>. 1242 */ 1243 rc = (sp->header.flag & BT_ROOT) ? 1244 dtSplitRoot(tid, ip, split, &rmp) : 1245 dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd); 1246 if (rc) { 1247 DT_PUTPAGE(smp); 1248 goto splitOut; 1249 } 1250 1251 /* smp and rmp are pinned */ 1252 } 1253 /* 1254 * parent page is not full - insert router entry in parent page 1255 */ 1256 else { 1257 BT_MARK_DIRTY(smp, ip); 1258 /* 1259 * acquire a transaction lock on the parent page 1260 */ 1261 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY); 1262 dtlck = (struct dt_lock *) & tlck->lock; 1263 ASSERT(dtlck->index == 0); 1264 lv = & dtlck->lv[0]; 1265 1266 /* linelock header */ 1267 lv->offset = 0; 1268 lv->length = 1; 1269 dtlck->index++; 1270 1271 /* linelock stbl of non-root parent page */ 1272 if (!(sp->header.flag & BT_ROOT)) { 1273 lv++; 1274 n = skip >> L2DTSLOTSIZE; 1275 lv->offset = sp->header.stblindex + n; 1276 lv->length = 1277 ((sp->header.nextindex - 1278 1) >> L2DTSLOTSIZE) - n + 1; 1279 dtlck->index++; 1280 } 1281 1282 dtInsertEntry(sp, skip, &key, data, &dtlck); 1283 1284 /* exit propagate up */ 1285 break; 1286 } 1287 } 1288 1289 /* unpin current split and its right page */ 1290 DT_PUTPAGE(smp); 1291 DT_PUTPAGE(rmp); 1292 1293 /* 1294 * free remaining extents allocated for split 1295 */ 1296 splitOut: 1297 n = pxdlist.npxd; 1298 pxd = &pxdlist.pxd[n]; 1299 for (; n < pxdlist.maxnpxd; n++, pxd++) 1300 dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd)); 1301 1302 freeKeyName: 1303 kfree(key.name); 1304 1305 /* Rollback quota allocation */ 1306 if (rc && quota_allocation) 1307 dquot_free_block(ip, quota_allocation); 1308 1309 dtSplitUp_Exit: 1310 1311 return rc; 1312 } 1313 1314 1315 /* 1316 * dtSplitPage() 1317 * 1318 * function: Split a non-root page of a btree. 1319 * 1320 * parameter: 1321 * 1322 * return: 0 - success; 1323 * errno - failure; 1324 * return split and new page pinned; 1325 */ 1326 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, 1327 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp) 1328 { 1329 int rc = 0; 1330 struct metapage *smp; 1331 dtpage_t *sp; 1332 struct metapage *rmp; 1333 dtpage_t *rp; /* new right page allocated */ 1334 s64 rbn; /* new right page block number */ 1335 struct metapage *mp; 1336 dtpage_t *p; 1337 s64 nextbn; 1338 struct pxdlist *pxdlist; 1339 pxd_t *pxd; 1340 int skip, nextindex, half, left, nxt, off, si; 1341 struct ldtentry *ldtentry; 1342 struct idtentry *idtentry; 1343 u8 *stbl; 1344 struct dtslot *f; 1345 int fsi, stblsize; 1346 int n; 1347 struct dt_lock *sdtlck, *rdtlck; 1348 struct tlock *tlck; 1349 struct dt_lock *dtlck; 1350 struct lv *slv, *rlv, *lv; 1351 1352 /* get split page */ 1353 smp = split->mp; 1354 sp = DT_PAGE(ip, smp); 1355 1356 /* 1357 * allocate the new right page for the split 1358 */ 1359 pxdlist = split->pxdlist; 1360 pxd = &pxdlist->pxd[pxdlist->npxd]; 1361 pxdlist->npxd++; 1362 rbn = addressPXD(pxd); 1363 rmp = get_metapage(ip, rbn, PSIZE, 1); 1364 if (rmp == NULL) 1365 return -EIO; 1366 1367 /* Allocate blocks to quota. */ 1368 rc = dquot_alloc_block(ip, lengthPXD(pxd)); 1369 if (rc) { 1370 release_metapage(rmp); 1371 return rc; 1372 } 1373 1374 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); 1375 1376 BT_MARK_DIRTY(rmp, ip); 1377 /* 1378 * acquire a transaction lock on the new right page 1379 */ 1380 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW); 1381 rdtlck = (struct dt_lock *) & tlck->lock; 1382 1383 rp = (dtpage_t *) rmp->data; 1384 *rpp = rp; 1385 rp->header.self = *pxd; 1386 1387 BT_MARK_DIRTY(smp, ip); 1388 /* 1389 * acquire a transaction lock on the split page 1390 * 1391 * action: 1392 */ 1393 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY); 1394 sdtlck = (struct dt_lock *) & tlck->lock; 1395 1396 /* linelock header of split page */ 1397 ASSERT(sdtlck->index == 0); 1398 slv = & sdtlck->lv[0]; 1399 slv->offset = 0; 1400 slv->length = 1; 1401 sdtlck->index++; 1402 1403 /* 1404 * initialize/update sibling pointers between sp and rp 1405 */ 1406 nextbn = le64_to_cpu(sp->header.next); 1407 rp->header.next = cpu_to_le64(nextbn); 1408 rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self)); 1409 sp->header.next = cpu_to_le64(rbn); 1410 1411 /* 1412 * initialize new right page 1413 */ 1414 rp->header.flag = sp->header.flag; 1415 1416 /* compute sorted entry table at start of extent data area */ 1417 rp->header.nextindex = 0; 1418 rp->header.stblindex = 1; 1419 1420 n = PSIZE >> L2DTSLOTSIZE; 1421 rp->header.maxslot = n; 1422 stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */ 1423 1424 /* init freelist */ 1425 fsi = rp->header.stblindex + stblsize; 1426 rp->header.freelist = fsi; 1427 rp->header.freecnt = rp->header.maxslot - fsi; 1428 1429 /* 1430 * sequential append at tail: append without split 1431 * 1432 * If splitting the last page on a level because of appending 1433 * a entry to it (skip is maxentry), it's likely that the access is 1434 * sequential. Adding an empty page on the side of the level is less 1435 * work and can push the fill factor much higher than normal. 1436 * If we're wrong it's no big deal, we'll just do the split the right 1437 * way next time. 1438 * (It may look like it's equally easy to do a similar hack for 1439 * reverse sorted data, that is, split the tree left, 1440 * but it's not. Be my guest.) 1441 */ 1442 if (nextbn == 0 && split->index == sp->header.nextindex) { 1443 /* linelock header + stbl (first slot) of new page */ 1444 rlv = & rdtlck->lv[rdtlck->index]; 1445 rlv->offset = 0; 1446 rlv->length = 2; 1447 rdtlck->index++; 1448 1449 /* 1450 * initialize freelist of new right page 1451 */ 1452 f = &rp->slot[fsi]; 1453 for (fsi++; fsi < rp->header.maxslot; f++, fsi++) 1454 f->next = fsi; 1455 f->next = -1; 1456 1457 /* insert entry at the first entry of the new right page */ 1458 dtInsertEntry(rp, 0, split->key, split->data, &rdtlck); 1459 1460 goto out; 1461 } 1462 1463 /* 1464 * non-sequential insert (at possibly middle page) 1465 */ 1466 1467 /* 1468 * update prev pointer of previous right sibling page; 1469 */ 1470 if (nextbn != 0) { 1471 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); 1472 if (rc) { 1473 discard_metapage(rmp); 1474 return rc; 1475 } 1476 1477 BT_MARK_DIRTY(mp, ip); 1478 /* 1479 * acquire a transaction lock on the next page 1480 */ 1481 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); 1482 jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p", 1483 tlck, ip, mp); 1484 dtlck = (struct dt_lock *) & tlck->lock; 1485 1486 /* linelock header of previous right sibling page */ 1487 lv = & dtlck->lv[dtlck->index]; 1488 lv->offset = 0; 1489 lv->length = 1; 1490 dtlck->index++; 1491 1492 p->header.prev = cpu_to_le64(rbn); 1493 1494 DT_PUTPAGE(mp); 1495 } 1496 1497 /* 1498 * split the data between the split and right pages. 1499 */ 1500 skip = split->index; 1501 half = (PSIZE >> L2DTSLOTSIZE) >> 1; /* swag */ 1502 left = 0; 1503 1504 /* 1505 * compute fill factor for split pages 1506 * 1507 * <nxt> traces the next entry to move to rp 1508 * <off> traces the next entry to stay in sp 1509 */ 1510 stbl = (u8 *) & sp->slot[sp->header.stblindex]; 1511 nextindex = sp->header.nextindex; 1512 for (nxt = off = 0; nxt < nextindex; ++off) { 1513 if (off == skip) 1514 /* check for fill factor with new entry size */ 1515 n = split->nslot; 1516 else { 1517 si = stbl[nxt]; 1518 switch (sp->header.flag & BT_TYPE) { 1519 case BT_LEAF: 1520 ldtentry = (struct ldtentry *) & sp->slot[si]; 1521 if (DO_INDEX(ip)) 1522 n = NDTLEAF(ldtentry->namlen); 1523 else 1524 n = NDTLEAF_LEGACY(ldtentry-> 1525 namlen); 1526 break; 1527 1528 case BT_INTERNAL: 1529 idtentry = (struct idtentry *) & sp->slot[si]; 1530 n = NDTINTERNAL(idtentry->namlen); 1531 break; 1532 1533 default: 1534 break; 1535 } 1536 1537 ++nxt; /* advance to next entry to move in sp */ 1538 } 1539 1540 left += n; 1541 if (left >= half) 1542 break; 1543 } 1544 1545 /* <nxt> poins to the 1st entry to move */ 1546 1547 /* 1548 * move entries to right page 1549 * 1550 * dtMoveEntry() initializes rp and reserves entry for insertion 1551 * 1552 * split page moved out entries are linelocked; 1553 * new/right page moved in entries are linelocked; 1554 */ 1555 /* linelock header + stbl of new right page */ 1556 rlv = & rdtlck->lv[rdtlck->index]; 1557 rlv->offset = 0; 1558 rlv->length = 5; 1559 rdtlck->index++; 1560 1561 dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip)); 1562 1563 sp->header.nextindex = nxt; 1564 1565 /* 1566 * finalize freelist of new right page 1567 */ 1568 fsi = rp->header.freelist; 1569 f = &rp->slot[fsi]; 1570 for (fsi++; fsi < rp->header.maxslot; f++, fsi++) 1571 f->next = fsi; 1572 f->next = -1; 1573 1574 /* 1575 * Update directory index table for entries now in right page 1576 */ 1577 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) { 1578 s64 lblock; 1579 1580 mp = NULL; 1581 stbl = DT_GETSTBL(rp); 1582 for (n = 0; n < rp->header.nextindex; n++) { 1583 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]]; 1584 modify_index(tid, ip, le32_to_cpu(ldtentry->index), 1585 rbn, n, &mp, &lblock); 1586 } 1587 if (mp) 1588 release_metapage(mp); 1589 } 1590 1591 /* 1592 * the skipped index was on the left page, 1593 */ 1594 if (skip <= off) { 1595 /* insert the new entry in the split page */ 1596 dtInsertEntry(sp, skip, split->key, split->data, &sdtlck); 1597 1598 /* linelock stbl of split page */ 1599 if (sdtlck->index >= sdtlck->maxcnt) 1600 sdtlck = (struct dt_lock *) txLinelock(sdtlck); 1601 slv = & sdtlck->lv[sdtlck->index]; 1602 n = skip >> L2DTSLOTSIZE; 1603 slv->offset = sp->header.stblindex + n; 1604 slv->length = 1605 ((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1; 1606 sdtlck->index++; 1607 } 1608 /* 1609 * the skipped index was on the right page, 1610 */ 1611 else { 1612 /* adjust the skip index to reflect the new position */ 1613 skip -= nxt; 1614 1615 /* insert the new entry in the right page */ 1616 dtInsertEntry(rp, skip, split->key, split->data, &rdtlck); 1617 } 1618 1619 out: 1620 *rmpp = rmp; 1621 *rpxdp = *pxd; 1622 1623 return rc; 1624 } 1625 1626 1627 /* 1628 * dtExtendPage() 1629 * 1630 * function: extend 1st/only directory leaf page 1631 * 1632 * parameter: 1633 * 1634 * return: 0 - success; 1635 * errno - failure; 1636 * return extended page pinned; 1637 */ 1638 static int dtExtendPage(tid_t tid, 1639 struct inode *ip, struct dtsplit * split, struct btstack * btstack) 1640 { 1641 struct super_block *sb = ip->i_sb; 1642 int rc; 1643 struct metapage *smp, *pmp, *mp; 1644 dtpage_t *sp, *pp; 1645 struct pxdlist *pxdlist; 1646 pxd_t *pxd, *tpxd; 1647 int xlen, xsize; 1648 int newstblindex, newstblsize; 1649 int oldstblindex, oldstblsize; 1650 int fsi, last; 1651 struct dtslot *f; 1652 struct btframe *parent; 1653 int n; 1654 struct dt_lock *dtlck; 1655 s64 xaddr, txaddr; 1656 struct tlock *tlck; 1657 struct pxd_lock *pxdlock; 1658 struct lv *lv; 1659 uint type; 1660 struct ldtentry *ldtentry; 1661 u8 *stbl; 1662 1663 /* get page to extend */ 1664 smp = split->mp; 1665 sp = DT_PAGE(ip, smp); 1666 1667 /* get parent/root page */ 1668 parent = BT_POP(btstack); 1669 DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc); 1670 if (rc) 1671 return (rc); 1672 1673 /* 1674 * extend the extent 1675 */ 1676 pxdlist = split->pxdlist; 1677 pxd = &pxdlist->pxd[pxdlist->npxd]; 1678 pxdlist->npxd++; 1679 1680 xaddr = addressPXD(pxd); 1681 tpxd = &sp->header.self; 1682 txaddr = addressPXD(tpxd); 1683 /* in-place extension */ 1684 if (xaddr == txaddr) { 1685 type = tlckEXTEND; 1686 } 1687 /* relocation */ 1688 else { 1689 type = tlckNEW; 1690 1691 /* save moved extent descriptor for later free */ 1692 tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE); 1693 pxdlock = (struct pxd_lock *) & tlck->lock; 1694 pxdlock->flag = mlckFREEPXD; 1695 pxdlock->pxd = sp->header.self; 1696 pxdlock->index = 1; 1697 1698 /* 1699 * Update directory index table to reflect new page address 1700 */ 1701 if (DO_INDEX(ip)) { 1702 s64 lblock; 1703 1704 mp = NULL; 1705 stbl = DT_GETSTBL(sp); 1706 for (n = 0; n < sp->header.nextindex; n++) { 1707 ldtentry = 1708 (struct ldtentry *) & sp->slot[stbl[n]]; 1709 modify_index(tid, ip, 1710 le32_to_cpu(ldtentry->index), 1711 xaddr, n, &mp, &lblock); 1712 } 1713 if (mp) 1714 release_metapage(mp); 1715 } 1716 } 1717 1718 /* 1719 * extend the page 1720 */ 1721 sp->header.self = *pxd; 1722 1723 jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp); 1724 1725 BT_MARK_DIRTY(smp, ip); 1726 /* 1727 * acquire a transaction lock on the extended/leaf page 1728 */ 1729 tlck = txLock(tid, ip, smp, tlckDTREE | type); 1730 dtlck = (struct dt_lock *) & tlck->lock; 1731 lv = & dtlck->lv[0]; 1732 1733 /* update buffer extent descriptor of extended page */ 1734 xlen = lengthPXD(pxd); 1735 xsize = xlen << JFS_SBI(sb)->l2bsize; 1736 1737 /* 1738 * copy old stbl to new stbl at start of extended area 1739 */ 1740 oldstblindex = sp->header.stblindex; 1741 oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE; 1742 newstblindex = sp->header.maxslot; 1743 n = xsize >> L2DTSLOTSIZE; 1744 newstblsize = (n + 31) >> L2DTSLOTSIZE; 1745 memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex], 1746 sp->header.nextindex); 1747 1748 /* 1749 * in-line extension: linelock old area of extended page 1750 */ 1751 if (type == tlckEXTEND) { 1752 /* linelock header */ 1753 lv->offset = 0; 1754 lv->length = 1; 1755 dtlck->index++; 1756 lv++; 1757 1758 /* linelock new stbl of extended page */ 1759 lv->offset = newstblindex; 1760 lv->length = newstblsize; 1761 } 1762 /* 1763 * relocation: linelock whole relocated area 1764 */ 1765 else { 1766 lv->offset = 0; 1767 lv->length = sp->header.maxslot + newstblsize; 1768 } 1769 1770 dtlck->index++; 1771 1772 sp->header.maxslot = n; 1773 sp->header.stblindex = newstblindex; 1774 /* sp->header.nextindex remains the same */ 1775 1776 /* 1777 * add old stbl region at head of freelist 1778 */ 1779 fsi = oldstblindex; 1780 f = &sp->slot[fsi]; 1781 last = sp->header.freelist; 1782 for (n = 0; n < oldstblsize; n++, fsi++, f++) { 1783 f->next = last; 1784 last = fsi; 1785 } 1786 sp->header.freelist = last; 1787 sp->header.freecnt += oldstblsize; 1788 1789 /* 1790 * append free region of newly extended area at tail of freelist 1791 */ 1792 /* init free region of newly extended area */ 1793 fsi = n = newstblindex + newstblsize; 1794 f = &sp->slot[fsi]; 1795 for (fsi++; fsi < sp->header.maxslot; f++, fsi++) 1796 f->next = fsi; 1797 f->next = -1; 1798 1799 /* append new free region at tail of old freelist */ 1800 fsi = sp->header.freelist; 1801 if (fsi == -1) 1802 sp->header.freelist = n; 1803 else { 1804 do { 1805 f = &sp->slot[fsi]; 1806 fsi = f->next; 1807 } while (fsi != -1); 1808 1809 f->next = n; 1810 } 1811 1812 sp->header.freecnt += sp->header.maxslot - n; 1813 1814 /* 1815 * insert the new entry 1816 */ 1817 dtInsertEntry(sp, split->index, split->key, split->data, &dtlck); 1818 1819 BT_MARK_DIRTY(pmp, ip); 1820 /* 1821 * linelock any freeslots residing in old extent 1822 */ 1823 if (type == tlckEXTEND) { 1824 n = sp->header.maxslot >> 2; 1825 if (sp->header.freelist < n) 1826 dtLinelockFreelist(sp, n, &dtlck); 1827 } 1828 1829 /* 1830 * update parent entry on the parent/root page 1831 */ 1832 /* 1833 * acquire a transaction lock on the parent/root page 1834 */ 1835 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY); 1836 dtlck = (struct dt_lock *) & tlck->lock; 1837 lv = & dtlck->lv[dtlck->index]; 1838 1839 /* linelock parent entry - 1st slot */ 1840 lv->offset = 1; 1841 lv->length = 1; 1842 dtlck->index++; 1843 1844 /* update the parent pxd for page extension */ 1845 tpxd = (pxd_t *) & pp->slot[1]; 1846 *tpxd = *pxd; 1847 1848 DT_PUTPAGE(pmp); 1849 return 0; 1850 } 1851 1852 1853 /* 1854 * dtSplitRoot() 1855 * 1856 * function: 1857 * split the full root page into 1858 * original/root/split page and new right page 1859 * i.e., root remains fixed in tree anchor (inode) and 1860 * the root is copied to a single new right child page 1861 * since root page << non-root page, and 1862 * the split root page contains a single entry for the 1863 * new right child page. 1864 * 1865 * parameter: 1866 * 1867 * return: 0 - success; 1868 * errno - failure; 1869 * return new page pinned; 1870 */ 1871 static int dtSplitRoot(tid_t tid, 1872 struct inode *ip, struct dtsplit * split, struct metapage ** rmpp) 1873 { 1874 struct super_block *sb = ip->i_sb; 1875 struct metapage *smp; 1876 dtroot_t *sp; 1877 struct metapage *rmp; 1878 dtpage_t *rp; 1879 s64 rbn; 1880 int xlen; 1881 int xsize; 1882 struct dtslot *f; 1883 s8 *stbl; 1884 int fsi, stblsize, n; 1885 struct idtentry *s; 1886 pxd_t *ppxd; 1887 struct pxdlist *pxdlist; 1888 pxd_t *pxd; 1889 struct dt_lock *dtlck; 1890 struct tlock *tlck; 1891 struct lv *lv; 1892 int rc; 1893 1894 /* get split root page */ 1895 smp = split->mp; 1896 sp = &JFS_IP(ip)->i_dtroot; 1897 1898 /* 1899 * allocate/initialize a single (right) child page 1900 * 1901 * N.B. at first split, a one (or two) block to fit new entry 1902 * is allocated; at subsequent split, a full page is allocated; 1903 */ 1904 pxdlist = split->pxdlist; 1905 pxd = &pxdlist->pxd[pxdlist->npxd]; 1906 pxdlist->npxd++; 1907 rbn = addressPXD(pxd); 1908 xlen = lengthPXD(pxd); 1909 xsize = xlen << JFS_SBI(sb)->l2bsize; 1910 rmp = get_metapage(ip, rbn, xsize, 1); 1911 if (!rmp) 1912 return -EIO; 1913 1914 rp = rmp->data; 1915 1916 /* Allocate blocks to quota. */ 1917 rc = dquot_alloc_block(ip, lengthPXD(pxd)); 1918 if (rc) { 1919 release_metapage(rmp); 1920 return rc; 1921 } 1922 1923 BT_MARK_DIRTY(rmp, ip); 1924 /* 1925 * acquire a transaction lock on the new right page 1926 */ 1927 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW); 1928 dtlck = (struct dt_lock *) & tlck->lock; 1929 1930 rp->header.flag = 1931 (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL; 1932 rp->header.self = *pxd; 1933 1934 /* initialize sibling pointers */ 1935 rp->header.next = 0; 1936 rp->header.prev = 0; 1937 1938 /* 1939 * move in-line root page into new right page extent 1940 */ 1941 /* linelock header + copied entries + new stbl (1st slot) in new page */ 1942 ASSERT(dtlck->index == 0); 1943 lv = & dtlck->lv[0]; 1944 lv->offset = 0; 1945 lv->length = 10; /* 1 + 8 + 1 */ 1946 dtlck->index++; 1947 1948 n = xsize >> L2DTSLOTSIZE; 1949 rp->header.maxslot = n; 1950 stblsize = (n + 31) >> L2DTSLOTSIZE; 1951 1952 /* copy old stbl to new stbl at start of extended area */ 1953 rp->header.stblindex = DTROOTMAXSLOT; 1954 stbl = (s8 *) & rp->slot[DTROOTMAXSLOT]; 1955 memcpy(stbl, sp->header.stbl, sp->header.nextindex); 1956 rp->header.nextindex = sp->header.nextindex; 1957 1958 /* copy old data area to start of new data area */ 1959 memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE); 1960 1961 /* 1962 * append free region of newly extended area at tail of freelist 1963 */ 1964 /* init free region of newly extended area */ 1965 fsi = n = DTROOTMAXSLOT + stblsize; 1966 f = &rp->slot[fsi]; 1967 for (fsi++; fsi < rp->header.maxslot; f++, fsi++) 1968 f->next = fsi; 1969 f->next = -1; 1970 1971 /* append new free region at tail of old freelist */ 1972 fsi = sp->header.freelist; 1973 if (fsi == -1) 1974 rp->header.freelist = n; 1975 else { 1976 rp->header.freelist = fsi; 1977 1978 do { 1979 f = &rp->slot[fsi]; 1980 fsi = f->next; 1981 } while (fsi >= 0); 1982 1983 f->next = n; 1984 } 1985 1986 rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n; 1987 1988 /* 1989 * Update directory index table for entries now in right page 1990 */ 1991 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) { 1992 s64 lblock; 1993 struct metapage *mp = NULL; 1994 struct ldtentry *ldtentry; 1995 1996 stbl = DT_GETSTBL(rp); 1997 for (n = 0; n < rp->header.nextindex; n++) { 1998 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]]; 1999 modify_index(tid, ip, le32_to_cpu(ldtentry->index), 2000 rbn, n, &mp, &lblock); 2001 } 2002 if (mp) 2003 release_metapage(mp); 2004 } 2005 /* 2006 * insert the new entry into the new right/child page 2007 * (skip index in the new right page will not change) 2008 */ 2009 dtInsertEntry(rp, split->index, split->key, split->data, &dtlck); 2010 2011 /* 2012 * reset parent/root page 2013 * 2014 * set the 1st entry offset to 0, which force the left-most key 2015 * at any level of the tree to be less than any search key. 2016 * 2017 * The btree comparison code guarantees that the left-most key on any 2018 * level of the tree is never used, so it doesn't need to be filled in. 2019 */ 2020 BT_MARK_DIRTY(smp, ip); 2021 /* 2022 * acquire a transaction lock on the root page (in-memory inode) 2023 */ 2024 tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT); 2025 dtlck = (struct dt_lock *) & tlck->lock; 2026 2027 /* linelock root */ 2028 ASSERT(dtlck->index == 0); 2029 lv = & dtlck->lv[0]; 2030 lv->offset = 0; 2031 lv->length = DTROOTMAXSLOT; 2032 dtlck->index++; 2033 2034 /* update page header of root */ 2035 if (sp->header.flag & BT_LEAF) { 2036 sp->header.flag &= ~BT_LEAF; 2037 sp->header.flag |= BT_INTERNAL; 2038 } 2039 2040 /* init the first entry */ 2041 s = (struct idtentry *) & sp->slot[DTENTRYSTART]; 2042 ppxd = (pxd_t *) s; 2043 *ppxd = *pxd; 2044 s->next = -1; 2045 s->namlen = 0; 2046 2047 stbl = sp->header.stbl; 2048 stbl[0] = DTENTRYSTART; 2049 sp->header.nextindex = 1; 2050 2051 /* init freelist */ 2052 fsi = DTENTRYSTART + 1; 2053 f = &sp->slot[fsi]; 2054 2055 /* init free region of remaining area */ 2056 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++) 2057 f->next = fsi; 2058 f->next = -1; 2059 2060 sp->header.freelist = DTENTRYSTART + 1; 2061 sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1); 2062 2063 *rmpp = rmp; 2064 2065 return 0; 2066 } 2067 2068 2069 /* 2070 * dtDelete() 2071 * 2072 * function: delete the entry(s) referenced by a key. 2073 * 2074 * parameter: 2075 * 2076 * return: 2077 */ 2078 int dtDelete(tid_t tid, 2079 struct inode *ip, struct component_name * key, ino_t * ino, int flag) 2080 { 2081 int rc = 0; 2082 s64 bn; 2083 struct metapage *mp, *imp; 2084 dtpage_t *p; 2085 int index; 2086 struct btstack btstack; 2087 struct dt_lock *dtlck; 2088 struct tlock *tlck; 2089 struct lv *lv; 2090 int i; 2091 struct ldtentry *ldtentry; 2092 u8 *stbl; 2093 u32 table_index, next_index; 2094 struct metapage *nmp; 2095 dtpage_t *np; 2096 2097 /* 2098 * search for the entry to delete: 2099 * 2100 * dtSearch() returns (leaf page pinned, index at which to delete). 2101 */ 2102 if ((rc = dtSearch(ip, key, ino, &btstack, flag))) 2103 return rc; 2104 2105 /* retrieve search result */ 2106 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); 2107 2108 /* 2109 * We need to find put the index of the next entry into the 2110 * directory index table in order to resume a readdir from this 2111 * entry. 2112 */ 2113 if (DO_INDEX(ip)) { 2114 stbl = DT_GETSTBL(p); 2115 ldtentry = (struct ldtentry *) & p->slot[stbl[index]]; 2116 table_index = le32_to_cpu(ldtentry->index); 2117 if (index == (p->header.nextindex - 1)) { 2118 /* 2119 * Last entry in this leaf page 2120 */ 2121 if ((p->header.flag & BT_ROOT) 2122 || (p->header.next == 0)) 2123 next_index = -1; 2124 else { 2125 /* Read next leaf page */ 2126 DT_GETPAGE(ip, le64_to_cpu(p->header.next), 2127 nmp, PSIZE, np, rc); 2128 if (rc) 2129 next_index = -1; 2130 else { 2131 stbl = DT_GETSTBL(np); 2132 ldtentry = 2133 (struct ldtentry *) & np-> 2134 slot[stbl[0]]; 2135 next_index = 2136 le32_to_cpu(ldtentry->index); 2137 DT_PUTPAGE(nmp); 2138 } 2139 } 2140 } else { 2141 ldtentry = 2142 (struct ldtentry *) & p->slot[stbl[index + 1]]; 2143 next_index = le32_to_cpu(ldtentry->index); 2144 } 2145 free_index(tid, ip, table_index, next_index); 2146 } 2147 /* 2148 * the leaf page becomes empty, delete the page 2149 */ 2150 if (p->header.nextindex == 1) { 2151 /* delete empty page */ 2152 rc = dtDeleteUp(tid, ip, mp, p, &btstack); 2153 } 2154 /* 2155 * the leaf page has other entries remaining: 2156 * 2157 * delete the entry from the leaf page. 2158 */ 2159 else { 2160 BT_MARK_DIRTY(mp, ip); 2161 /* 2162 * acquire a transaction lock on the leaf page 2163 */ 2164 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); 2165 dtlck = (struct dt_lock *) & tlck->lock; 2166 2167 /* 2168 * Do not assume that dtlck->index will be zero. During a 2169 * rename within a directory, this transaction may have 2170 * modified this page already when adding the new entry. 2171 */ 2172 2173 /* linelock header */ 2174 if (dtlck->index >= dtlck->maxcnt) 2175 dtlck = (struct dt_lock *) txLinelock(dtlck); 2176 lv = & dtlck->lv[dtlck->index]; 2177 lv->offset = 0; 2178 lv->length = 1; 2179 dtlck->index++; 2180 2181 /* linelock stbl of non-root leaf page */ 2182 if (!(p->header.flag & BT_ROOT)) { 2183 if (dtlck->index >= dtlck->maxcnt) 2184 dtlck = (struct dt_lock *) txLinelock(dtlck); 2185 lv = & dtlck->lv[dtlck->index]; 2186 i = index >> L2DTSLOTSIZE; 2187 lv->offset = p->header.stblindex + i; 2188 lv->length = 2189 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - 2190 i + 1; 2191 dtlck->index++; 2192 } 2193 2194 /* free the leaf entry */ 2195 dtDeleteEntry(p, index, &dtlck); 2196 2197 /* 2198 * Update directory index table for entries moved in stbl 2199 */ 2200 if (DO_INDEX(ip) && index < p->header.nextindex) { 2201 s64 lblock; 2202 2203 imp = NULL; 2204 stbl = DT_GETSTBL(p); 2205 for (i = index; i < p->header.nextindex; i++) { 2206 ldtentry = 2207 (struct ldtentry *) & p->slot[stbl[i]]; 2208 modify_index(tid, ip, 2209 le32_to_cpu(ldtentry->index), 2210 bn, i, &imp, &lblock); 2211 } 2212 if (imp) 2213 release_metapage(imp); 2214 } 2215 2216 DT_PUTPAGE(mp); 2217 } 2218 2219 return rc; 2220 } 2221 2222 2223 /* 2224 * dtDeleteUp() 2225 * 2226 * function: 2227 * free empty pages as propagating deletion up the tree 2228 * 2229 * parameter: 2230 * 2231 * return: 2232 */ 2233 static int dtDeleteUp(tid_t tid, struct inode *ip, 2234 struct metapage * fmp, dtpage_t * fp, struct btstack * btstack) 2235 { 2236 int rc = 0; 2237 struct metapage *mp; 2238 dtpage_t *p; 2239 int index, nextindex; 2240 int xlen; 2241 struct btframe *parent; 2242 struct dt_lock *dtlck; 2243 struct tlock *tlck; 2244 struct lv *lv; 2245 struct pxd_lock *pxdlock; 2246 int i; 2247 2248 /* 2249 * keep the root leaf page which has become empty 2250 */ 2251 if (BT_IS_ROOT(fmp)) { 2252 /* 2253 * reset the root 2254 * 2255 * dtInitRoot() acquires txlock on the root 2256 */ 2257 dtInitRoot(tid, ip, PARENT(ip)); 2258 2259 DT_PUTPAGE(fmp); 2260 2261 return 0; 2262 } 2263 2264 /* 2265 * free the non-root leaf page 2266 */ 2267 /* 2268 * acquire a transaction lock on the page 2269 * 2270 * write FREEXTENT|NOREDOPAGE log record 2271 * N.B. linelock is overlaid as freed extent descriptor, and 2272 * the buffer page is freed; 2273 */ 2274 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE); 2275 pxdlock = (struct pxd_lock *) & tlck->lock; 2276 pxdlock->flag = mlckFREEPXD; 2277 pxdlock->pxd = fp->header.self; 2278 pxdlock->index = 1; 2279 2280 /* update sibling pointers */ 2281 if ((rc = dtRelink(tid, ip, fp))) { 2282 BT_PUTPAGE(fmp); 2283 return rc; 2284 } 2285 2286 xlen = lengthPXD(&fp->header.self); 2287 2288 /* Free quota allocation. */ 2289 dquot_free_block(ip, xlen); 2290 2291 /* free/invalidate its buffer page */ 2292 discard_metapage(fmp); 2293 2294 /* 2295 * propagate page deletion up the directory tree 2296 * 2297 * If the delete from the parent page makes it empty, 2298 * continue all the way up the tree. 2299 * stop if the root page is reached (which is never deleted) or 2300 * if the entry deletion does not empty the page. 2301 */ 2302 while ((parent = BT_POP(btstack)) != NULL) { 2303 /* pin the parent page <sp> */ 2304 DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc); 2305 if (rc) 2306 return rc; 2307 2308 /* 2309 * free the extent of the child page deleted 2310 */ 2311 index = parent->index; 2312 2313 /* 2314 * delete the entry for the child page from parent 2315 */ 2316 nextindex = p->header.nextindex; 2317 2318 /* 2319 * the parent has the single entry being deleted: 2320 * 2321 * free the parent page which has become empty. 2322 */ 2323 if (nextindex == 1) { 2324 /* 2325 * keep the root internal page which has become empty 2326 */ 2327 if (p->header.flag & BT_ROOT) { 2328 /* 2329 * reset the root 2330 * 2331 * dtInitRoot() acquires txlock on the root 2332 */ 2333 dtInitRoot(tid, ip, PARENT(ip)); 2334 2335 DT_PUTPAGE(mp); 2336 2337 return 0; 2338 } 2339 /* 2340 * free the parent page 2341 */ 2342 else { 2343 /* 2344 * acquire a transaction lock on the page 2345 * 2346 * write FREEXTENT|NOREDOPAGE log record 2347 */ 2348 tlck = 2349 txMaplock(tid, ip, 2350 tlckDTREE | tlckFREE); 2351 pxdlock = (struct pxd_lock *) & tlck->lock; 2352 pxdlock->flag = mlckFREEPXD; 2353 pxdlock->pxd = p->header.self; 2354 pxdlock->index = 1; 2355 2356 /* update sibling pointers */ 2357 if ((rc = dtRelink(tid, ip, p))) { 2358 DT_PUTPAGE(mp); 2359 return rc; 2360 } 2361 2362 xlen = lengthPXD(&p->header.self); 2363 2364 /* Free quota allocation */ 2365 dquot_free_block(ip, xlen); 2366 2367 /* free/invalidate its buffer page */ 2368 discard_metapage(mp); 2369 2370 /* propagate up */ 2371 continue; 2372 } 2373 } 2374 2375 /* 2376 * the parent has other entries remaining: 2377 * 2378 * delete the router entry from the parent page. 2379 */ 2380 BT_MARK_DIRTY(mp, ip); 2381 /* 2382 * acquire a transaction lock on the page 2383 * 2384 * action: router entry deletion 2385 */ 2386 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); 2387 dtlck = (struct dt_lock *) & tlck->lock; 2388 2389 /* linelock header */ 2390 if (dtlck->index >= dtlck->maxcnt) 2391 dtlck = (struct dt_lock *) txLinelock(dtlck); 2392 lv = & dtlck->lv[dtlck->index]; 2393 lv->offset = 0; 2394 lv->length = 1; 2395 dtlck->index++; 2396 2397 /* linelock stbl of non-root leaf page */ 2398 if (!(p->header.flag & BT_ROOT)) { 2399 if (dtlck->index < dtlck->maxcnt) 2400 lv++; 2401 else { 2402 dtlck = (struct dt_lock *) txLinelock(dtlck); 2403 lv = & dtlck->lv[0]; 2404 } 2405 i = index >> L2DTSLOTSIZE; 2406 lv->offset = p->header.stblindex + i; 2407 lv->length = 2408 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - 2409 i + 1; 2410 dtlck->index++; 2411 } 2412 2413 /* free the router entry */ 2414 dtDeleteEntry(p, index, &dtlck); 2415 2416 /* reset key of new leftmost entry of level (for consistency) */ 2417 if (index == 0 && 2418 ((p->header.flag & BT_ROOT) || p->header.prev == 0)) 2419 dtTruncateEntry(p, 0, &dtlck); 2420 2421 /* unpin the parent page */ 2422 DT_PUTPAGE(mp); 2423 2424 /* exit propagation up */ 2425 break; 2426 } 2427 2428 if (!DO_INDEX(ip)) 2429 ip->i_size -= PSIZE; 2430 2431 return 0; 2432 } 2433 2434 /* 2435 * dtRelink() 2436 * 2437 * function: 2438 * link around a freed page. 2439 * 2440 * parameter: 2441 * fp: page to be freed 2442 * 2443 * return: 2444 */ 2445 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p) 2446 { 2447 int rc; 2448 struct metapage *mp; 2449 s64 nextbn, prevbn; 2450 struct tlock *tlck; 2451 struct dt_lock *dtlck; 2452 struct lv *lv; 2453 2454 nextbn = le64_to_cpu(p->header.next); 2455 prevbn = le64_to_cpu(p->header.prev); 2456 2457 /* update prev pointer of the next page */ 2458 if (nextbn != 0) { 2459 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); 2460 if (rc) 2461 return rc; 2462 2463 BT_MARK_DIRTY(mp, ip); 2464 /* 2465 * acquire a transaction lock on the next page 2466 * 2467 * action: update prev pointer; 2468 */ 2469 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); 2470 jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p", 2471 tlck, ip, mp); 2472 dtlck = (struct dt_lock *) & tlck->lock; 2473 2474 /* linelock header */ 2475 if (dtlck->index >= dtlck->maxcnt) 2476 dtlck = (struct dt_lock *) txLinelock(dtlck); 2477 lv = & dtlck->lv[dtlck->index]; 2478 lv->offset = 0; 2479 lv->length = 1; 2480 dtlck->index++; 2481 2482 p->header.prev = cpu_to_le64(prevbn); 2483 DT_PUTPAGE(mp); 2484 } 2485 2486 /* update next pointer of the previous page */ 2487 if (prevbn != 0) { 2488 DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc); 2489 if (rc) 2490 return rc; 2491 2492 BT_MARK_DIRTY(mp, ip); 2493 /* 2494 * acquire a transaction lock on the prev page 2495 * 2496 * action: update next pointer; 2497 */ 2498 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); 2499 jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p", 2500 tlck, ip, mp); 2501 dtlck = (struct dt_lock *) & tlck->lock; 2502 2503 /* linelock header */ 2504 if (dtlck->index >= dtlck->maxcnt) 2505 dtlck = (struct dt_lock *) txLinelock(dtlck); 2506 lv = & dtlck->lv[dtlck->index]; 2507 lv->offset = 0; 2508 lv->length = 1; 2509 dtlck->index++; 2510 2511 p->header.next = cpu_to_le64(nextbn); 2512 DT_PUTPAGE(mp); 2513 } 2514 2515 return 0; 2516 } 2517 2518 2519 /* 2520 * dtInitRoot() 2521 * 2522 * initialize directory root (inline in inode) 2523 */ 2524 void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot) 2525 { 2526 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 2527 dtroot_t *p; 2528 int fsi; 2529 struct dtslot *f; 2530 struct tlock *tlck; 2531 struct dt_lock *dtlck; 2532 struct lv *lv; 2533 u16 xflag_save; 2534 2535 /* 2536 * If this was previously an non-empty directory, we need to remove 2537 * the old directory table. 2538 */ 2539 if (DO_INDEX(ip)) { 2540 if (!jfs_dirtable_inline(ip)) { 2541 struct tblock *tblk = tid_to_tblock(tid); 2542 /* 2543 * We're playing games with the tid's xflag. If 2544 * we're removing a regular file, the file's xtree 2545 * is committed with COMMIT_PMAP, but we always 2546 * commit the directories xtree with COMMIT_PWMAP. 2547 */ 2548 xflag_save = tblk->xflag; 2549 tblk->xflag = 0; 2550 /* 2551 * xtTruncate isn't guaranteed to fully truncate 2552 * the xtree. The caller needs to check i_size 2553 * after committing the transaction to see if 2554 * additional truncation is needed. The 2555 * COMMIT_Stale flag tells caller that we 2556 * initiated the truncation. 2557 */ 2558 xtTruncate(tid, ip, 0, COMMIT_PWMAP); 2559 set_cflag(COMMIT_Stale, ip); 2560 2561 tblk->xflag = xflag_save; 2562 } else 2563 ip->i_size = 1; 2564 2565 jfs_ip->next_index = 2; 2566 } else 2567 ip->i_size = IDATASIZE; 2568 2569 /* 2570 * acquire a transaction lock on the root 2571 * 2572 * action: directory initialization; 2573 */ 2574 tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag, 2575 tlckDTREE | tlckENTRY | tlckBTROOT); 2576 dtlck = (struct dt_lock *) & tlck->lock; 2577 2578 /* linelock root */ 2579 ASSERT(dtlck->index == 0); 2580 lv = & dtlck->lv[0]; 2581 lv->offset = 0; 2582 lv->length = DTROOTMAXSLOT; 2583 dtlck->index++; 2584 2585 p = &jfs_ip->i_dtroot; 2586 2587 p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF; 2588 2589 p->header.nextindex = 0; 2590 2591 /* init freelist */ 2592 fsi = 1; 2593 f = &p->slot[fsi]; 2594 2595 /* init data area of root */ 2596 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++) 2597 f->next = fsi; 2598 f->next = -1; 2599 2600 p->header.freelist = 1; 2601 p->header.freecnt = 8; 2602 2603 /* init '..' entry */ 2604 p->header.idotdot = cpu_to_le32(idotdot); 2605 2606 return; 2607 } 2608 2609 /* 2610 * add_missing_indices() 2611 * 2612 * function: Fix dtree page in which one or more entries has an invalid index. 2613 * fsck.jfs should really fix this, but it currently does not. 2614 * Called from jfs_readdir when bad index is detected. 2615 */ 2616 static void add_missing_indices(struct inode *inode, s64 bn) 2617 { 2618 struct ldtentry *d; 2619 struct dt_lock *dtlck; 2620 int i; 2621 uint index; 2622 struct lv *lv; 2623 struct metapage *mp; 2624 dtpage_t *p; 2625 int rc; 2626 s8 *stbl; 2627 tid_t tid; 2628 struct tlock *tlck; 2629 2630 tid = txBegin(inode->i_sb, 0); 2631 2632 DT_GETPAGE(inode, bn, mp, PSIZE, p, rc); 2633 2634 if (rc) { 2635 printk(KERN_ERR "DT_GETPAGE failed!\n"); 2636 goto end; 2637 } 2638 BT_MARK_DIRTY(mp, inode); 2639 2640 ASSERT(p->header.flag & BT_LEAF); 2641 2642 tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY); 2643 if (BT_IS_ROOT(mp)) 2644 tlck->type |= tlckBTROOT; 2645 2646 dtlck = (struct dt_lock *) &tlck->lock; 2647 2648 stbl = DT_GETSTBL(p); 2649 for (i = 0; i < p->header.nextindex; i++) { 2650 d = (struct ldtentry *) &p->slot[stbl[i]]; 2651 index = le32_to_cpu(d->index); 2652 if ((index < 2) || (index >= JFS_IP(inode)->next_index)) { 2653 d->index = cpu_to_le32(add_index(tid, inode, bn, i)); 2654 if (dtlck->index >= dtlck->maxcnt) 2655 dtlck = (struct dt_lock *) txLinelock(dtlck); 2656 lv = &dtlck->lv[dtlck->index]; 2657 lv->offset = stbl[i]; 2658 lv->length = 1; 2659 dtlck->index++; 2660 } 2661 } 2662 2663 DT_PUTPAGE(mp); 2664 (void) txCommit(tid, 1, &inode, 0); 2665 end: 2666 txEnd(tid); 2667 } 2668 2669 /* 2670 * Buffer to hold directory entry info while traversing a dtree page 2671 * before being fed to the filldir function 2672 */ 2673 struct jfs_dirent { 2674 loff_t position; 2675 int ino; 2676 u16 name_len; 2677 char name[]; 2678 }; 2679 2680 /* 2681 * function to determine next variable-sized jfs_dirent in buffer 2682 */ 2683 static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent) 2684 { 2685 return (struct jfs_dirent *) 2686 ((char *)dirent + 2687 ((sizeof (struct jfs_dirent) + dirent->name_len + 1 + 2688 sizeof (loff_t) - 1) & 2689 ~(sizeof (loff_t) - 1))); 2690 } 2691 2692 /* 2693 * jfs_readdir() 2694 * 2695 * function: read directory entries sequentially 2696 * from the specified entry offset 2697 * 2698 * parameter: 2699 * 2700 * return: offset = (pn, index) of start entry 2701 * of next jfs_readdir()/dtRead() 2702 */ 2703 int jfs_readdir(struct file *file, struct dir_context *ctx) 2704 { 2705 struct inode *ip = file_inode(file); 2706 struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab; 2707 int rc = 0; 2708 loff_t dtpos; /* legacy OS/2 style position */ 2709 struct dtoffset { 2710 s16 pn; 2711 s16 index; 2712 s32 unused; 2713 } *dtoffset = (struct dtoffset *) &dtpos; 2714 s64 bn; 2715 struct metapage *mp; 2716 dtpage_t *p; 2717 int index; 2718 s8 *stbl; 2719 struct btstack btstack; 2720 int i, next; 2721 struct ldtentry *d; 2722 struct dtslot *t; 2723 int d_namleft, len, outlen; 2724 unsigned long dirent_buf; 2725 char *name_ptr; 2726 u32 dir_index; 2727 int do_index = 0; 2728 uint loop_count = 0; 2729 struct jfs_dirent *jfs_dirent; 2730 int jfs_dirents; 2731 int overflow, fix_page, page_fixed = 0; 2732 static int unique_pos = 2; /* If we can't fix broken index */ 2733 2734 if (ctx->pos == DIREND) 2735 return 0; 2736 2737 if (DO_INDEX(ip)) { 2738 /* 2739 * persistent index is stored in directory entries. 2740 * Special cases: 0 = . 2741 * 1 = .. 2742 * -1 = End of directory 2743 */ 2744 do_index = 1; 2745 2746 dir_index = (u32) ctx->pos; 2747 2748 /* 2749 * NFSv4 reserves cookies 1 and 2 for . and .. so the value 2750 * we return to the vfs is one greater than the one we use 2751 * internally. 2752 */ 2753 if (dir_index) 2754 dir_index--; 2755 2756 if (dir_index > 1) { 2757 struct dir_table_slot dirtab_slot; 2758 2759 if (dtEmpty(ip) || 2760 (dir_index >= JFS_IP(ip)->next_index)) { 2761 /* Stale position. Directory has shrunk */ 2762 ctx->pos = DIREND; 2763 return 0; 2764 } 2765 repeat: 2766 rc = read_index(ip, dir_index, &dirtab_slot); 2767 if (rc) { 2768 ctx->pos = DIREND; 2769 return rc; 2770 } 2771 if (dirtab_slot.flag == DIR_INDEX_FREE) { 2772 if (loop_count++ > JFS_IP(ip)->next_index) { 2773 jfs_err("jfs_readdir detected infinite loop!"); 2774 ctx->pos = DIREND; 2775 return 0; 2776 } 2777 dir_index = le32_to_cpu(dirtab_slot.addr2); 2778 if (dir_index == -1) { 2779 ctx->pos = DIREND; 2780 return 0; 2781 } 2782 goto repeat; 2783 } 2784 bn = addressDTS(&dirtab_slot); 2785 index = dirtab_slot.slot; 2786 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 2787 if (rc) { 2788 ctx->pos = DIREND; 2789 return 0; 2790 } 2791 if (p->header.flag & BT_INTERNAL) { 2792 jfs_err("jfs_readdir: bad index table"); 2793 DT_PUTPAGE(mp); 2794 ctx->pos = DIREND; 2795 return 0; 2796 } 2797 } else { 2798 if (dir_index == 0) { 2799 /* 2800 * self "." 2801 */ 2802 ctx->pos = 1; 2803 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 2804 return 0; 2805 } 2806 /* 2807 * parent ".." 2808 */ 2809 ctx->pos = 2; 2810 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 2811 return 0; 2812 2813 /* 2814 * Find first entry of left-most leaf 2815 */ 2816 if (dtEmpty(ip)) { 2817 ctx->pos = DIREND; 2818 return 0; 2819 } 2820 2821 if ((rc = dtReadFirst(ip, &btstack))) 2822 return rc; 2823 2824 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); 2825 } 2826 } else { 2827 /* 2828 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 2829 * 2830 * pn = 0; index = 1: First entry "." 2831 * pn = 0; index = 2: Second entry ".." 2832 * pn > 0: Real entries, pn=1 -> leftmost page 2833 * pn = index = -1: No more entries 2834 */ 2835 dtpos = ctx->pos; 2836 if (dtpos < 2) { 2837 /* build "." entry */ 2838 ctx->pos = 1; 2839 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 2840 return 0; 2841 dtoffset->index = 2; 2842 ctx->pos = dtpos; 2843 } 2844 2845 if (dtoffset->pn == 0) { 2846 if (dtoffset->index == 2) { 2847 /* build ".." entry */ 2848 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 2849 return 0; 2850 } else { 2851 jfs_err("jfs_readdir called with invalid offset!"); 2852 } 2853 dtoffset->pn = 1; 2854 dtoffset->index = 0; 2855 ctx->pos = dtpos; 2856 } 2857 2858 if (dtEmpty(ip)) { 2859 ctx->pos = DIREND; 2860 return 0; 2861 } 2862 2863 if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) { 2864 jfs_err("jfs_readdir: unexpected rc = %d from dtReadNext", 2865 rc); 2866 ctx->pos = DIREND; 2867 return 0; 2868 } 2869 /* get start leaf page and index */ 2870 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); 2871 2872 /* offset beyond directory eof ? */ 2873 if (bn < 0) { 2874 ctx->pos = DIREND; 2875 return 0; 2876 } 2877 } 2878 2879 dirent_buf = __get_free_page(GFP_KERNEL); 2880 if (dirent_buf == 0) { 2881 DT_PUTPAGE(mp); 2882 jfs_warn("jfs_readdir: __get_free_page failed!"); 2883 ctx->pos = DIREND; 2884 return -ENOMEM; 2885 } 2886 2887 while (1) { 2888 jfs_dirent = (struct jfs_dirent *) dirent_buf; 2889 jfs_dirents = 0; 2890 overflow = fix_page = 0; 2891 2892 stbl = DT_GETSTBL(p); 2893 2894 for (i = index; i < p->header.nextindex; i++) { 2895 if (stbl[i] < 0 || stbl[i] > 127) { 2896 jfs_err("JFS: Invalid stbl[%d] = %d for inode %ld, block = %lld", 2897 i, stbl[i], (long)ip->i_ino, (long long)bn); 2898 free_page(dirent_buf); 2899 DT_PUTPAGE(mp); 2900 return -EIO; 2901 } 2902 2903 d = (struct ldtentry *) & p->slot[stbl[i]]; 2904 2905 if (((long) jfs_dirent + d->namlen + 1) > 2906 (dirent_buf + PAGE_SIZE)) { 2907 /* DBCS codepages could overrun dirent_buf */ 2908 index = i; 2909 overflow = 1; 2910 break; 2911 } 2912 2913 d_namleft = d->namlen; 2914 name_ptr = jfs_dirent->name; 2915 jfs_dirent->ino = le32_to_cpu(d->inumber); 2916 2917 if (do_index) { 2918 len = min(d_namleft, DTLHDRDATALEN); 2919 jfs_dirent->position = le32_to_cpu(d->index); 2920 /* 2921 * d->index should always be valid, but it 2922 * isn't. fsck.jfs doesn't create the 2923 * directory index for the lost+found 2924 * directory. Rather than let it go, 2925 * we can try to fix it. 2926 */ 2927 if ((jfs_dirent->position < 2) || 2928 (jfs_dirent->position >= 2929 JFS_IP(ip)->next_index)) { 2930 if (!page_fixed && !isReadOnly(ip)) { 2931 fix_page = 1; 2932 /* 2933 * setting overflow and setting 2934 * index to i will cause the 2935 * same page to be processed 2936 * again starting here 2937 */ 2938 overflow = 1; 2939 index = i; 2940 break; 2941 } 2942 jfs_dirent->position = unique_pos++; 2943 } 2944 /* 2945 * We add 1 to the index because we may 2946 * use a value of 2 internally, and NFSv4 2947 * doesn't like that. 2948 */ 2949 jfs_dirent->position++; 2950 } else { 2951 jfs_dirent->position = dtpos; 2952 len = min(d_namleft, DTLHDRDATALEN_LEGACY); 2953 } 2954 2955 /* copy the name of head/only segment */ 2956 outlen = jfs_strfromUCS_le(name_ptr, d->name, len, 2957 codepage); 2958 jfs_dirent->name_len = outlen; 2959 2960 /* copy name in the additional segment(s) */ 2961 next = d->next; 2962 while (next >= 0) { 2963 t = (struct dtslot *) & p->slot[next]; 2964 name_ptr += outlen; 2965 d_namleft -= len; 2966 /* Sanity Check */ 2967 if (d_namleft == 0) { 2968 jfs_error(ip->i_sb, 2969 "JFS:Dtree error: ino = %ld, bn=%lld, index = %d\n", 2970 (long)ip->i_ino, 2971 (long long)bn, 2972 i); 2973 goto skip_one; 2974 } 2975 len = min(d_namleft, DTSLOTDATALEN); 2976 outlen = jfs_strfromUCS_le(name_ptr, t->name, 2977 len, codepage); 2978 jfs_dirent->name_len += outlen; 2979 2980 next = t->next; 2981 } 2982 2983 jfs_dirents++; 2984 jfs_dirent = next_jfs_dirent(jfs_dirent); 2985 skip_one: 2986 if (!do_index) 2987 dtoffset->index++; 2988 } 2989 2990 if (!overflow) { 2991 /* Point to next leaf page */ 2992 if (p->header.flag & BT_ROOT) 2993 bn = 0; 2994 else { 2995 bn = le64_to_cpu(p->header.next); 2996 index = 0; 2997 /* update offset (pn:index) for new page */ 2998 if (!do_index) { 2999 dtoffset->pn++; 3000 dtoffset->index = 0; 3001 } 3002 } 3003 page_fixed = 0; 3004 } 3005 3006 /* unpin previous leaf page */ 3007 DT_PUTPAGE(mp); 3008 3009 jfs_dirent = (struct jfs_dirent *) dirent_buf; 3010 while (jfs_dirents--) { 3011 ctx->pos = jfs_dirent->position; 3012 if (!dir_emit(ctx, jfs_dirent->name, 3013 jfs_dirent->name_len, 3014 jfs_dirent->ino, DT_UNKNOWN)) 3015 goto out; 3016 jfs_dirent = next_jfs_dirent(jfs_dirent); 3017 } 3018 3019 if (fix_page) { 3020 add_missing_indices(ip, bn); 3021 page_fixed = 1; 3022 } 3023 3024 if (!overflow && (bn == 0)) { 3025 ctx->pos = DIREND; 3026 break; 3027 } 3028 3029 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 3030 if (rc) { 3031 free_page(dirent_buf); 3032 return rc; 3033 } 3034 } 3035 3036 out: 3037 free_page(dirent_buf); 3038 3039 return rc; 3040 } 3041 3042 3043 /* 3044 * dtReadFirst() 3045 * 3046 * function: get the leftmost page of the directory 3047 */ 3048 static int dtReadFirst(struct inode *ip, struct btstack * btstack) 3049 { 3050 int rc = 0; 3051 s64 bn; 3052 int psize = 288; /* initial in-line directory */ 3053 struct metapage *mp; 3054 dtpage_t *p; 3055 s8 *stbl; 3056 struct btframe *btsp; 3057 pxd_t *xd; 3058 3059 BT_CLR(btstack); /* reset stack */ 3060 3061 /* 3062 * descend leftmost path of the tree 3063 * 3064 * by convention, root bn = 0. 3065 */ 3066 for (bn = 0;;) { 3067 DT_GETPAGE(ip, bn, mp, psize, p, rc); 3068 if (rc) 3069 return rc; 3070 3071 /* 3072 * leftmost leaf page 3073 */ 3074 if (p->header.flag & BT_LEAF) { 3075 /* return leftmost entry */ 3076 btsp = btstack->top; 3077 btsp->bn = bn; 3078 btsp->index = 0; 3079 btsp->mp = mp; 3080 3081 return 0; 3082 } 3083 3084 /* 3085 * descend down to leftmost child page 3086 */ 3087 if (BT_STACK_FULL(btstack)) { 3088 DT_PUTPAGE(mp); 3089 jfs_error(ip->i_sb, "btstack overrun\n"); 3090 BT_STACK_DUMP(btstack); 3091 return -EIO; 3092 } 3093 /* push (bn, index) of the parent page/entry */ 3094 BT_PUSH(btstack, bn, 0); 3095 3096 /* get the leftmost entry */ 3097 stbl = DT_GETSTBL(p); 3098 3099 if (stbl[0] < 0 || stbl[0] > 127) { 3100 DT_PUTPAGE(mp); 3101 jfs_error(ip->i_sb, "stbl[0] out of bound\n"); 3102 return -EIO; 3103 } 3104 3105 xd = (pxd_t *) & p->slot[stbl[0]]; 3106 3107 /* get the child page block address */ 3108 bn = addressPXD(xd); 3109 psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize; 3110 3111 /* unpin the parent page */ 3112 DT_PUTPAGE(mp); 3113 } 3114 } 3115 3116 3117 /* 3118 * dtReadNext() 3119 * 3120 * function: get the page of the specified offset (pn:index) 3121 * 3122 * return: if (offset > eof), bn = -1; 3123 * 3124 * note: if index > nextindex of the target leaf page, 3125 * start with 1st entry of next leaf page; 3126 */ 3127 static int dtReadNext(struct inode *ip, loff_t * offset, 3128 struct btstack * btstack) 3129 { 3130 int rc = 0; 3131 struct dtoffset { 3132 s16 pn; 3133 s16 index; 3134 s32 unused; 3135 } *dtoffset = (struct dtoffset *) offset; 3136 s64 bn; 3137 struct metapage *mp; 3138 dtpage_t *p; 3139 int index; 3140 int pn; 3141 s8 *stbl; 3142 struct btframe *btsp, *parent; 3143 pxd_t *xd; 3144 3145 /* 3146 * get leftmost leaf page pinned 3147 */ 3148 if ((rc = dtReadFirst(ip, btstack))) 3149 return rc; 3150 3151 /* get leaf page */ 3152 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index); 3153 3154 /* get the start offset (pn:index) */ 3155 pn = dtoffset->pn - 1; /* Now pn = 0 represents leftmost leaf */ 3156 index = dtoffset->index; 3157 3158 /* start at leftmost page ? */ 3159 if (pn == 0) { 3160 /* offset beyond eof ? */ 3161 if (index < p->header.nextindex) 3162 goto out; 3163 3164 if (p->header.flag & BT_ROOT) { 3165 bn = -1; 3166 goto out; 3167 } 3168 3169 /* start with 1st entry of next leaf page */ 3170 dtoffset->pn++; 3171 dtoffset->index = index = 0; 3172 goto a; 3173 } 3174 3175 /* start at non-leftmost page: scan parent pages for large pn */ 3176 if (p->header.flag & BT_ROOT) { 3177 bn = -1; 3178 goto out; 3179 } 3180 3181 /* start after next leaf page ? */ 3182 if (pn > 1) 3183 goto b; 3184 3185 /* get leaf page pn = 1 */ 3186 a: 3187 bn = le64_to_cpu(p->header.next); 3188 3189 /* unpin leaf page */ 3190 DT_PUTPAGE(mp); 3191 3192 /* offset beyond eof ? */ 3193 if (bn == 0) { 3194 bn = -1; 3195 goto out; 3196 } 3197 3198 goto c; 3199 3200 /* 3201 * scan last internal page level to get target leaf page 3202 */ 3203 b: 3204 /* unpin leftmost leaf page */ 3205 DT_PUTPAGE(mp); 3206 3207 /* get left most parent page */ 3208 btsp = btstack->top; 3209 parent = btsp - 1; 3210 bn = parent->bn; 3211 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 3212 if (rc) 3213 return rc; 3214 3215 /* scan parent pages at last internal page level */ 3216 while (pn >= p->header.nextindex) { 3217 pn -= p->header.nextindex; 3218 3219 /* get next parent page address */ 3220 bn = le64_to_cpu(p->header.next); 3221 3222 /* unpin current parent page */ 3223 DT_PUTPAGE(mp); 3224 3225 /* offset beyond eof ? */ 3226 if (bn == 0) { 3227 bn = -1; 3228 goto out; 3229 } 3230 3231 /* get next parent page */ 3232 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 3233 if (rc) 3234 return rc; 3235 3236 /* update parent page stack frame */ 3237 parent->bn = bn; 3238 } 3239 3240 /* get leaf page address */ 3241 stbl = DT_GETSTBL(p); 3242 xd = (pxd_t *) & p->slot[stbl[pn]]; 3243 bn = addressPXD(xd); 3244 3245 /* unpin parent page */ 3246 DT_PUTPAGE(mp); 3247 3248 /* 3249 * get target leaf page 3250 */ 3251 c: 3252 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 3253 if (rc) 3254 return rc; 3255 3256 /* 3257 * leaf page has been completed: 3258 * start with 1st entry of next leaf page 3259 */ 3260 if (index >= p->header.nextindex) { 3261 bn = le64_to_cpu(p->header.next); 3262 3263 /* unpin leaf page */ 3264 DT_PUTPAGE(mp); 3265 3266 /* offset beyond eof ? */ 3267 if (bn == 0) { 3268 bn = -1; 3269 goto out; 3270 } 3271 3272 /* get next leaf page */ 3273 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); 3274 if (rc) 3275 return rc; 3276 3277 /* start with 1st entry of next leaf page */ 3278 dtoffset->pn++; 3279 dtoffset->index = 0; 3280 } 3281 3282 out: 3283 /* return target leaf page pinned */ 3284 btsp = btstack->top; 3285 btsp->bn = bn; 3286 btsp->index = dtoffset->index; 3287 btsp->mp = mp; 3288 3289 return 0; 3290 } 3291 3292 3293 /* 3294 * dtCompare() 3295 * 3296 * function: compare search key with an internal entry 3297 * 3298 * return: 3299 * < 0 if k is < record 3300 * = 0 if k is = record 3301 * > 0 if k is > record 3302 */ 3303 static int dtCompare(struct component_name * key, /* search key */ 3304 dtpage_t * p, /* directory page */ 3305 int si) 3306 { /* entry slot index */ 3307 wchar_t *kname; 3308 __le16 *name; 3309 int klen, namlen, len, rc; 3310 struct idtentry *ih; 3311 struct dtslot *t; 3312 3313 /* 3314 * force the left-most key on internal pages, at any level of 3315 * the tree, to be less than any search key. 3316 * this obviates having to update the leftmost key on an internal 3317 * page when the user inserts a new key in the tree smaller than 3318 * anything that has been stored. 3319 * 3320 * (? if/when dtSearch() narrows down to 1st entry (index = 0), 3321 * at any internal page at any level of the tree, 3322 * it descends to child of the entry anyway - 3323 * ? make the entry as min size dummy entry) 3324 * 3325 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF)) 3326 * return (1); 3327 */ 3328 3329 kname = key->name; 3330 klen = key->namlen; 3331 3332 ih = (struct idtentry *) & p->slot[si]; 3333 si = ih->next; 3334 name = ih->name; 3335 namlen = ih->namlen; 3336 len = min(namlen, DTIHDRDATALEN); 3337 3338 /* compare with head/only segment */ 3339 len = min(klen, len); 3340 if ((rc = UniStrncmp_le(kname, name, len))) 3341 return rc; 3342 3343 klen -= len; 3344 namlen -= len; 3345 3346 /* compare with additional segment(s) */ 3347 kname += len; 3348 while (klen > 0 && namlen > 0) { 3349 /* compare with next name segment */ 3350 t = (struct dtslot *) & p->slot[si]; 3351 len = min(namlen, DTSLOTDATALEN); 3352 len = min(klen, len); 3353 name = t->name; 3354 if ((rc = UniStrncmp_le(kname, name, len))) 3355 return rc; 3356 3357 klen -= len; 3358 namlen -= len; 3359 kname += len; 3360 si = t->next; 3361 } 3362 3363 return (klen - namlen); 3364 } 3365 3366 3367 3368 3369 /* 3370 * ciCompare() 3371 * 3372 * function: compare search key with an (leaf/internal) entry 3373 * 3374 * return: 3375 * < 0 if k is < record 3376 * = 0 if k is = record 3377 * > 0 if k is > record 3378 */ 3379 static int ciCompare(struct component_name * key, /* search key */ 3380 dtpage_t * p, /* directory page */ 3381 int si, /* entry slot index */ 3382 int flag) 3383 { 3384 wchar_t *kname, x; 3385 __le16 *name; 3386 int klen, namlen, len, rc; 3387 struct ldtentry *lh; 3388 struct idtentry *ih; 3389 struct dtslot *t; 3390 int i; 3391 3392 /* 3393 * force the left-most key on internal pages, at any level of 3394 * the tree, to be less than any search key. 3395 * this obviates having to update the leftmost key on an internal 3396 * page when the user inserts a new key in the tree smaller than 3397 * anything that has been stored. 3398 * 3399 * (? if/when dtSearch() narrows down to 1st entry (index = 0), 3400 * at any internal page at any level of the tree, 3401 * it descends to child of the entry anyway - 3402 * ? make the entry as min size dummy entry) 3403 * 3404 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF)) 3405 * return (1); 3406 */ 3407 3408 kname = key->name; 3409 klen = key->namlen; 3410 3411 /* 3412 * leaf page entry 3413 */ 3414 if (p->header.flag & BT_LEAF) { 3415 lh = (struct ldtentry *) & p->slot[si]; 3416 si = lh->next; 3417 name = lh->name; 3418 namlen = lh->namlen; 3419 if (flag & JFS_DIR_INDEX) 3420 len = min(namlen, DTLHDRDATALEN); 3421 else 3422 len = min(namlen, DTLHDRDATALEN_LEGACY); 3423 } 3424 /* 3425 * internal page entry 3426 */ 3427 else { 3428 ih = (struct idtentry *) & p->slot[si]; 3429 si = ih->next; 3430 name = ih->name; 3431 namlen = ih->namlen; 3432 len = min(namlen, DTIHDRDATALEN); 3433 } 3434 3435 /* compare with head/only segment */ 3436 len = min(klen, len); 3437 for (i = 0; i < len; i++, kname++, name++) { 3438 /* only uppercase if case-insensitive support is on */ 3439 if ((flag & JFS_OS2) == JFS_OS2) 3440 x = UniToupper(le16_to_cpu(*name)); 3441 else 3442 x = le16_to_cpu(*name); 3443 if ((rc = *kname - x)) 3444 return rc; 3445 } 3446 3447 klen -= len; 3448 namlen -= len; 3449 3450 /* compare with additional segment(s) */ 3451 while (klen > 0 && namlen > 0) { 3452 /* compare with next name segment */ 3453 t = (struct dtslot *) & p->slot[si]; 3454 len = min(namlen, DTSLOTDATALEN); 3455 len = min(klen, len); 3456 name = t->name; 3457 for (i = 0; i < len; i++, kname++, name++) { 3458 /* only uppercase if case-insensitive support is on */ 3459 if ((flag & JFS_OS2) == JFS_OS2) 3460 x = UniToupper(le16_to_cpu(*name)); 3461 else 3462 x = le16_to_cpu(*name); 3463 3464 if ((rc = *kname - x)) 3465 return rc; 3466 } 3467 3468 klen -= len; 3469 namlen -= len; 3470 si = t->next; 3471 } 3472 3473 return (klen - namlen); 3474 } 3475 3476 3477 /* 3478 * ciGetLeafPrefixKey() 3479 * 3480 * function: compute prefix of suffix compression 3481 * from two adjacent leaf entries 3482 * across page boundary 3483 * 3484 * return: non-zero on error 3485 * 3486 */ 3487 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp, 3488 int ri, struct component_name * key, int flag) 3489 { 3490 int klen, namlen; 3491 wchar_t *pl, *pr, *kname; 3492 struct component_name lkey; 3493 struct component_name rkey; 3494 3495 lkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t), 3496 GFP_KERNEL); 3497 if (lkey.name == NULL) 3498 return -ENOMEM; 3499 3500 rkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t), 3501 GFP_KERNEL); 3502 if (rkey.name == NULL) { 3503 kfree(lkey.name); 3504 return -ENOMEM; 3505 } 3506 3507 /* get left and right key */ 3508 dtGetKey(lp, li, &lkey, flag); 3509 lkey.name[lkey.namlen] = 0; 3510 3511 if ((flag & JFS_OS2) == JFS_OS2) 3512 ciToUpper(&lkey); 3513 3514 dtGetKey(rp, ri, &rkey, flag); 3515 rkey.name[rkey.namlen] = 0; 3516 3517 3518 if ((flag & JFS_OS2) == JFS_OS2) 3519 ciToUpper(&rkey); 3520 3521 /* compute prefix */ 3522 klen = 0; 3523 kname = key->name; 3524 namlen = min(lkey.namlen, rkey.namlen); 3525 for (pl = lkey.name, pr = rkey.name; 3526 namlen; pl++, pr++, namlen--, klen++, kname++) { 3527 *kname = *pr; 3528 if (*pl != *pr) { 3529 key->namlen = klen + 1; 3530 goto free_names; 3531 } 3532 } 3533 3534 /* l->namlen <= r->namlen since l <= r */ 3535 if (lkey.namlen < rkey.namlen) { 3536 *kname = *pr; 3537 key->namlen = klen + 1; 3538 } else /* l->namelen == r->namelen */ 3539 key->namlen = klen; 3540 3541 free_names: 3542 kfree(lkey.name); 3543 kfree(rkey.name); 3544 return 0; 3545 } 3546 3547 3548 3549 /* 3550 * dtGetKey() 3551 * 3552 * function: get key of the entry 3553 */ 3554 static void dtGetKey(dtpage_t * p, int i, /* entry index */ 3555 struct component_name * key, int flag) 3556 { 3557 int si; 3558 s8 *stbl; 3559 struct ldtentry *lh; 3560 struct idtentry *ih; 3561 struct dtslot *t; 3562 int namlen, len; 3563 wchar_t *kname; 3564 __le16 *name; 3565 3566 /* get entry */ 3567 stbl = DT_GETSTBL(p); 3568 si = stbl[i]; 3569 if (p->header.flag & BT_LEAF) { 3570 lh = (struct ldtentry *) & p->slot[si]; 3571 si = lh->next; 3572 namlen = lh->namlen; 3573 name = lh->name; 3574 if (flag & JFS_DIR_INDEX) 3575 len = min(namlen, DTLHDRDATALEN); 3576 else 3577 len = min(namlen, DTLHDRDATALEN_LEGACY); 3578 } else { 3579 ih = (struct idtentry *) & p->slot[si]; 3580 si = ih->next; 3581 namlen = ih->namlen; 3582 name = ih->name; 3583 len = min(namlen, DTIHDRDATALEN); 3584 } 3585 3586 key->namlen = namlen; 3587 kname = key->name; 3588 3589 /* 3590 * move head/only segment 3591 */ 3592 UniStrncpy_from_le(kname, name, len); 3593 3594 /* 3595 * move additional segment(s) 3596 */ 3597 while (si >= 0) { 3598 /* get next segment */ 3599 t = &p->slot[si]; 3600 kname += len; 3601 namlen -= len; 3602 len = min(namlen, DTSLOTDATALEN); 3603 UniStrncpy_from_le(kname, t->name, len); 3604 3605 si = t->next; 3606 } 3607 } 3608 3609 3610 /* 3611 * dtInsertEntry() 3612 * 3613 * function: allocate free slot(s) and 3614 * write a leaf/internal entry 3615 * 3616 * return: entry slot index 3617 */ 3618 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key, 3619 ddata_t * data, struct dt_lock ** dtlock) 3620 { 3621 struct dtslot *h, *t; 3622 struct ldtentry *lh = NULL; 3623 struct idtentry *ih = NULL; 3624 int hsi, fsi, klen, len, nextindex; 3625 wchar_t *kname; 3626 __le16 *name; 3627 s8 *stbl; 3628 pxd_t *xd; 3629 struct dt_lock *dtlck = *dtlock; 3630 struct lv *lv; 3631 int xsi, n; 3632 s64 bn = 0; 3633 struct metapage *mp = NULL; 3634 3635 klen = key->namlen; 3636 kname = key->name; 3637 3638 /* allocate a free slot */ 3639 hsi = fsi = p->header.freelist; 3640 h = &p->slot[fsi]; 3641 p->header.freelist = h->next; 3642 --p->header.freecnt; 3643 3644 /* open new linelock */ 3645 if (dtlck->index >= dtlck->maxcnt) 3646 dtlck = (struct dt_lock *) txLinelock(dtlck); 3647 3648 lv = & dtlck->lv[dtlck->index]; 3649 lv->offset = hsi; 3650 3651 /* write head/only segment */ 3652 if (p->header.flag & BT_LEAF) { 3653 lh = (struct ldtentry *) h; 3654 lh->next = h->next; 3655 lh->inumber = cpu_to_le32(data->leaf.ino); 3656 lh->namlen = klen; 3657 name = lh->name; 3658 if (data->leaf.ip) { 3659 len = min(klen, DTLHDRDATALEN); 3660 if (!(p->header.flag & BT_ROOT)) 3661 bn = addressPXD(&p->header.self); 3662 lh->index = cpu_to_le32(add_index(data->leaf.tid, 3663 data->leaf.ip, 3664 bn, index)); 3665 } else 3666 len = min(klen, DTLHDRDATALEN_LEGACY); 3667 } else { 3668 ih = (struct idtentry *) h; 3669 ih->next = h->next; 3670 xd = (pxd_t *) ih; 3671 *xd = data->xd; 3672 ih->namlen = klen; 3673 name = ih->name; 3674 len = min(klen, DTIHDRDATALEN); 3675 } 3676 3677 UniStrncpy_to_le(name, kname, len); 3678 3679 n = 1; 3680 xsi = hsi; 3681 3682 /* write additional segment(s) */ 3683 t = h; 3684 klen -= len; 3685 while (klen) { 3686 /* get free slot */ 3687 fsi = p->header.freelist; 3688 t = &p->slot[fsi]; 3689 p->header.freelist = t->next; 3690 --p->header.freecnt; 3691 3692 /* is next slot contiguous ? */ 3693 if (fsi != xsi + 1) { 3694 /* close current linelock */ 3695 lv->length = n; 3696 dtlck->index++; 3697 3698 /* open new linelock */ 3699 if (dtlck->index < dtlck->maxcnt) 3700 lv++; 3701 else { 3702 dtlck = (struct dt_lock *) txLinelock(dtlck); 3703 lv = & dtlck->lv[0]; 3704 } 3705 3706 lv->offset = fsi; 3707 n = 0; 3708 } 3709 3710 kname += len; 3711 len = min(klen, DTSLOTDATALEN); 3712 UniStrncpy_to_le(t->name, kname, len); 3713 3714 n++; 3715 xsi = fsi; 3716 klen -= len; 3717 } 3718 3719 /* close current linelock */ 3720 lv->length = n; 3721 dtlck->index++; 3722 3723 *dtlock = dtlck; 3724 3725 /* terminate last/only segment */ 3726 if (h == t) { 3727 /* single segment entry */ 3728 if (p->header.flag & BT_LEAF) 3729 lh->next = -1; 3730 else 3731 ih->next = -1; 3732 } else 3733 /* multi-segment entry */ 3734 t->next = -1; 3735 3736 /* if insert into middle, shift right succeeding entries in stbl */ 3737 stbl = DT_GETSTBL(p); 3738 nextindex = p->header.nextindex; 3739 if (index < nextindex) { 3740 memmove(stbl + index + 1, stbl + index, nextindex - index); 3741 3742 if ((p->header.flag & BT_LEAF) && data->leaf.ip) { 3743 s64 lblock; 3744 3745 /* 3746 * Need to update slot number for entries that moved 3747 * in the stbl 3748 */ 3749 mp = NULL; 3750 for (n = index + 1; n <= nextindex; n++) { 3751 lh = (struct ldtentry *) & (p->slot[stbl[n]]); 3752 modify_index(data->leaf.tid, data->leaf.ip, 3753 le32_to_cpu(lh->index), bn, n, 3754 &mp, &lblock); 3755 } 3756 if (mp) 3757 release_metapage(mp); 3758 } 3759 } 3760 3761 stbl[index] = hsi; 3762 3763 /* advance next available entry index of stbl */ 3764 ++p->header.nextindex; 3765 } 3766 3767 3768 /* 3769 * dtMoveEntry() 3770 * 3771 * function: move entries from split/left page to new/right page 3772 * 3773 * nextindex of dst page and freelist/freecnt of both pages 3774 * are updated. 3775 */ 3776 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp, 3777 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock, 3778 int do_index) 3779 { 3780 int ssi, next; /* src slot index */ 3781 int di; /* dst entry index */ 3782 int dsi; /* dst slot index */ 3783 s8 *sstbl, *dstbl; /* sorted entry table */ 3784 int snamlen, len; 3785 struct ldtentry *slh, *dlh = NULL; 3786 struct idtentry *sih, *dih = NULL; 3787 struct dtslot *h, *s, *d; 3788 struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock; 3789 struct lv *slv, *dlv; 3790 int xssi, ns, nd; 3791 int sfsi; 3792 3793 sstbl = (s8 *) & sp->slot[sp->header.stblindex]; 3794 dstbl = (s8 *) & dp->slot[dp->header.stblindex]; 3795 3796 dsi = dp->header.freelist; /* first (whole page) free slot */ 3797 sfsi = sp->header.freelist; 3798 3799 /* linelock destination entry slot */ 3800 dlv = & ddtlck->lv[ddtlck->index]; 3801 dlv->offset = dsi; 3802 3803 /* linelock source entry slot */ 3804 slv = & sdtlck->lv[sdtlck->index]; 3805 slv->offset = sstbl[si]; 3806 xssi = slv->offset - 1; 3807 3808 /* 3809 * move entries 3810 */ 3811 ns = nd = 0; 3812 for (di = 0; si < sp->header.nextindex; si++, di++) { 3813 ssi = sstbl[si]; 3814 dstbl[di] = dsi; 3815 3816 /* is next slot contiguous ? */ 3817 if (ssi != xssi + 1) { 3818 /* close current linelock */ 3819 slv->length = ns; 3820 sdtlck->index++; 3821 3822 /* open new linelock */ 3823 if (sdtlck->index < sdtlck->maxcnt) 3824 slv++; 3825 else { 3826 sdtlck = (struct dt_lock *) txLinelock(sdtlck); 3827 slv = & sdtlck->lv[0]; 3828 } 3829 3830 slv->offset = ssi; 3831 ns = 0; 3832 } 3833 3834 /* 3835 * move head/only segment of an entry 3836 */ 3837 /* get dst slot */ 3838 h = d = &dp->slot[dsi]; 3839 3840 /* get src slot and move */ 3841 s = &sp->slot[ssi]; 3842 if (sp->header.flag & BT_LEAF) { 3843 /* get source entry */ 3844 slh = (struct ldtentry *) s; 3845 dlh = (struct ldtentry *) h; 3846 snamlen = slh->namlen; 3847 3848 if (do_index) { 3849 len = min(snamlen, DTLHDRDATALEN); 3850 dlh->index = slh->index; /* little-endian */ 3851 } else 3852 len = min(snamlen, DTLHDRDATALEN_LEGACY); 3853 3854 memcpy(dlh, slh, 6 + len * 2); 3855 3856 next = slh->next; 3857 3858 /* update dst head/only segment next field */ 3859 dsi++; 3860 dlh->next = dsi; 3861 } else { 3862 sih = (struct idtentry *) s; 3863 snamlen = sih->namlen; 3864 3865 len = min(snamlen, DTIHDRDATALEN); 3866 dih = (struct idtentry *) h; 3867 memcpy(dih, sih, 10 + len * 2); 3868 next = sih->next; 3869 3870 dsi++; 3871 dih->next = dsi; 3872 } 3873 3874 /* free src head/only segment */ 3875 s->next = sfsi; 3876 s->cnt = 1; 3877 sfsi = ssi; 3878 3879 ns++; 3880 nd++; 3881 xssi = ssi; 3882 3883 /* 3884 * move additional segment(s) of the entry 3885 */ 3886 snamlen -= len; 3887 while ((ssi = next) >= 0) { 3888 /* is next slot contiguous ? */ 3889 if (ssi != xssi + 1) { 3890 /* close current linelock */ 3891 slv->length = ns; 3892 sdtlck->index++; 3893 3894 /* open new linelock */ 3895 if (sdtlck->index < sdtlck->maxcnt) 3896 slv++; 3897 else { 3898 sdtlck = 3899 (struct dt_lock *) 3900 txLinelock(sdtlck); 3901 slv = & sdtlck->lv[0]; 3902 } 3903 3904 slv->offset = ssi; 3905 ns = 0; 3906 } 3907 3908 /* get next source segment */ 3909 s = &sp->slot[ssi]; 3910 3911 /* get next destination free slot */ 3912 d++; 3913 3914 len = min(snamlen, DTSLOTDATALEN); 3915 UniStrncpy_le(d->name, s->name, len); 3916 3917 ns++; 3918 nd++; 3919 xssi = ssi; 3920 3921 dsi++; 3922 d->next = dsi; 3923 3924 /* free source segment */ 3925 next = s->next; 3926 s->next = sfsi; 3927 s->cnt = 1; 3928 sfsi = ssi; 3929 3930 snamlen -= len; 3931 } /* end while */ 3932 3933 /* terminate dst last/only segment */ 3934 if (h == d) { 3935 /* single segment entry */ 3936 if (dp->header.flag & BT_LEAF) 3937 dlh->next = -1; 3938 else 3939 dih->next = -1; 3940 } else 3941 /* multi-segment entry */ 3942 d->next = -1; 3943 } /* end for */ 3944 3945 /* close current linelock */ 3946 slv->length = ns; 3947 sdtlck->index++; 3948 *sdtlock = sdtlck; 3949 3950 dlv->length = nd; 3951 ddtlck->index++; 3952 *ddtlock = ddtlck; 3953 3954 /* update source header */ 3955 sp->header.freelist = sfsi; 3956 sp->header.freecnt += nd; 3957 3958 /* update destination header */ 3959 dp->header.nextindex = di; 3960 3961 dp->header.freelist = dsi; 3962 dp->header.freecnt -= nd; 3963 } 3964 3965 3966 /* 3967 * dtDeleteEntry() 3968 * 3969 * function: free a (leaf/internal) entry 3970 * 3971 * log freelist header, stbl, and each segment slot of entry 3972 * (even though last/only segment next field is modified, 3973 * physical image logging requires all segment slots of 3974 * the entry logged to avoid applying previous updates 3975 * to the same slots) 3976 */ 3977 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock) 3978 { 3979 int fsi; /* free entry slot index */ 3980 s8 *stbl; 3981 struct dtslot *t; 3982 int si, freecnt; 3983 struct dt_lock *dtlck = *dtlock; 3984 struct lv *lv; 3985 int xsi, n; 3986 3987 /* get free entry slot index */ 3988 stbl = DT_GETSTBL(p); 3989 fsi = stbl[fi]; 3990 3991 /* open new linelock */ 3992 if (dtlck->index >= dtlck->maxcnt) 3993 dtlck = (struct dt_lock *) txLinelock(dtlck); 3994 lv = & dtlck->lv[dtlck->index]; 3995 3996 lv->offset = fsi; 3997 3998 /* get the head/only segment */ 3999 t = &p->slot[fsi]; 4000 if (p->header.flag & BT_LEAF) 4001 si = ((struct ldtentry *) t)->next; 4002 else 4003 si = ((struct idtentry *) t)->next; 4004 t->next = si; 4005 t->cnt = 1; 4006 4007 n = freecnt = 1; 4008 xsi = fsi; 4009 4010 /* find the last/only segment */ 4011 while (si >= 0) { 4012 /* is next slot contiguous ? */ 4013 if (si != xsi + 1) { 4014 /* close current linelock */ 4015 lv->length = n; 4016 dtlck->index++; 4017 4018 /* open new linelock */ 4019 if (dtlck->index < dtlck->maxcnt) 4020 lv++; 4021 else { 4022 dtlck = (struct dt_lock *) txLinelock(dtlck); 4023 lv = & dtlck->lv[0]; 4024 } 4025 4026 lv->offset = si; 4027 n = 0; 4028 } 4029 4030 n++; 4031 xsi = si; 4032 freecnt++; 4033 4034 t = &p->slot[si]; 4035 t->cnt = 1; 4036 si = t->next; 4037 } 4038 4039 /* close current linelock */ 4040 lv->length = n; 4041 dtlck->index++; 4042 4043 *dtlock = dtlck; 4044 4045 /* update freelist */ 4046 t->next = p->header.freelist; 4047 p->header.freelist = fsi; 4048 p->header.freecnt += freecnt; 4049 4050 /* if delete from middle, 4051 * shift left the succedding entries in the stbl 4052 */ 4053 si = p->header.nextindex; 4054 if (fi < si - 1) 4055 memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1); 4056 4057 p->header.nextindex--; 4058 } 4059 4060 4061 /* 4062 * dtTruncateEntry() 4063 * 4064 * function: truncate a (leaf/internal) entry 4065 * 4066 * log freelist header, stbl, and each segment slot of entry 4067 * (even though last/only segment next field is modified, 4068 * physical image logging requires all segment slots of 4069 * the entry logged to avoid applying previous updates 4070 * to the same slots) 4071 */ 4072 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock) 4073 { 4074 int tsi; /* truncate entry slot index */ 4075 s8 *stbl; 4076 struct dtslot *t; 4077 int si, freecnt; 4078 struct dt_lock *dtlck = *dtlock; 4079 struct lv *lv; 4080 int fsi, xsi, n; 4081 4082 /* get free entry slot index */ 4083 stbl = DT_GETSTBL(p); 4084 tsi = stbl[ti]; 4085 4086 /* open new linelock */ 4087 if (dtlck->index >= dtlck->maxcnt) 4088 dtlck = (struct dt_lock *) txLinelock(dtlck); 4089 lv = & dtlck->lv[dtlck->index]; 4090 4091 lv->offset = tsi; 4092 4093 /* get the head/only segment */ 4094 t = &p->slot[tsi]; 4095 ASSERT(p->header.flag & BT_INTERNAL); 4096 ((struct idtentry *) t)->namlen = 0; 4097 si = ((struct idtentry *) t)->next; 4098 ((struct idtentry *) t)->next = -1; 4099 4100 n = 1; 4101 freecnt = 0; 4102 fsi = si; 4103 xsi = tsi; 4104 4105 /* find the last/only segment */ 4106 while (si >= 0) { 4107 /* is next slot contiguous ? */ 4108 if (si != xsi + 1) { 4109 /* close current linelock */ 4110 lv->length = n; 4111 dtlck->index++; 4112 4113 /* open new linelock */ 4114 if (dtlck->index < dtlck->maxcnt) 4115 lv++; 4116 else { 4117 dtlck = (struct dt_lock *) txLinelock(dtlck); 4118 lv = & dtlck->lv[0]; 4119 } 4120 4121 lv->offset = si; 4122 n = 0; 4123 } 4124 4125 n++; 4126 xsi = si; 4127 freecnt++; 4128 4129 t = &p->slot[si]; 4130 t->cnt = 1; 4131 si = t->next; 4132 } 4133 4134 /* close current linelock */ 4135 lv->length = n; 4136 dtlck->index++; 4137 4138 *dtlock = dtlck; 4139 4140 /* update freelist */ 4141 if (freecnt == 0) 4142 return; 4143 t->next = p->header.freelist; 4144 p->header.freelist = fsi; 4145 p->header.freecnt += freecnt; 4146 } 4147 4148 4149 /* 4150 * dtLinelockFreelist() 4151 */ 4152 static void dtLinelockFreelist(dtpage_t * p, /* directory page */ 4153 int m, /* max slot index */ 4154 struct dt_lock ** dtlock) 4155 { 4156 int fsi; /* free entry slot index */ 4157 struct dtslot *t; 4158 int si; 4159 struct dt_lock *dtlck = *dtlock; 4160 struct lv *lv; 4161 int xsi, n; 4162 4163 /* get free entry slot index */ 4164 fsi = p->header.freelist; 4165 4166 /* open new linelock */ 4167 if (dtlck->index >= dtlck->maxcnt) 4168 dtlck = (struct dt_lock *) txLinelock(dtlck); 4169 lv = & dtlck->lv[dtlck->index]; 4170 4171 lv->offset = fsi; 4172 4173 n = 1; 4174 xsi = fsi; 4175 4176 t = &p->slot[fsi]; 4177 si = t->next; 4178 4179 /* find the last/only segment */ 4180 while (si < m && si >= 0) { 4181 /* is next slot contiguous ? */ 4182 if (si != xsi + 1) { 4183 /* close current linelock */ 4184 lv->length = n; 4185 dtlck->index++; 4186 4187 /* open new linelock */ 4188 if (dtlck->index < dtlck->maxcnt) 4189 lv++; 4190 else { 4191 dtlck = (struct dt_lock *) txLinelock(dtlck); 4192 lv = & dtlck->lv[0]; 4193 } 4194 4195 lv->offset = si; 4196 n = 0; 4197 } 4198 4199 n++; 4200 xsi = si; 4201 4202 t = &p->slot[si]; 4203 si = t->next; 4204 } 4205 4206 /* close current linelock */ 4207 lv->length = n; 4208 dtlck->index++; 4209 4210 *dtlock = dtlck; 4211 } 4212 4213 4214 /* 4215 * NAME: dtModify 4216 * 4217 * FUNCTION: Modify the inode number part of a directory entry 4218 * 4219 * PARAMETERS: 4220 * tid - Transaction id 4221 * ip - Inode of parent directory 4222 * key - Name of entry to be modified 4223 * orig_ino - Original inode number expected in entry 4224 * new_ino - New inode number to put into entry 4225 * flag - JFS_RENAME 4226 * 4227 * RETURNS: 4228 * -ESTALE - If entry found does not match orig_ino passed in 4229 * -ENOENT - If no entry can be found to match key 4230 * 0 - If successfully modified entry 4231 */ 4232 int dtModify(tid_t tid, struct inode *ip, 4233 struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag) 4234 { 4235 int rc; 4236 s64 bn; 4237 struct metapage *mp; 4238 dtpage_t *p; 4239 int index; 4240 struct btstack btstack; 4241 struct tlock *tlck; 4242 struct dt_lock *dtlck; 4243 struct lv *lv; 4244 s8 *stbl; 4245 int entry_si; /* entry slot index */ 4246 struct ldtentry *entry; 4247 4248 /* 4249 * search for the entry to modify: 4250 * 4251 * dtSearch() returns (leaf page pinned, index at which to modify). 4252 */ 4253 if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag))) 4254 return rc; 4255 4256 /* retrieve search result */ 4257 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); 4258 4259 BT_MARK_DIRTY(mp, ip); 4260 /* 4261 * acquire a transaction lock on the leaf page of named entry 4262 */ 4263 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); 4264 dtlck = (struct dt_lock *) & tlck->lock; 4265 4266 /* get slot index of the entry */ 4267 stbl = DT_GETSTBL(p); 4268 entry_si = stbl[index]; 4269 4270 /* linelock entry */ 4271 ASSERT(dtlck->index == 0); 4272 lv = & dtlck->lv[0]; 4273 lv->offset = entry_si; 4274 lv->length = 1; 4275 dtlck->index++; 4276 4277 /* get the head/only segment */ 4278 entry = (struct ldtentry *) & p->slot[entry_si]; 4279 4280 /* substitute the inode number of the entry */ 4281 entry->inumber = cpu_to_le32(new_ino); 4282 4283 /* unpin the leaf page */ 4284 DT_PUTPAGE(mp); 4285 4286 return 0; 4287 } 4288