1 /* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5 /* 6 * Now we have all buffers that must be used in balancing of the tree 7 * Further calculations can not cause schedule(), and thus the buffer 8 * tree will be stable until the balancing will be finished 9 * balance the tree according to the analysis made before, 10 * and using buffers obtained after all above. 11 */ 12 13 #include <linux/uaccess.h> 14 #include <linux/time.h> 15 #include "reiserfs.h" 16 #include <linux/buffer_head.h> 17 #include <linux/kernel.h> 18 19 static inline void buffer_info_init_left(struct tree_balance *tb, 20 struct buffer_info *bi) 21 { 22 bi->tb = tb; 23 bi->bi_bh = tb->L[0]; 24 bi->bi_parent = tb->FL[0]; 25 bi->bi_position = get_left_neighbor_position(tb, 0); 26 } 27 28 static inline void buffer_info_init_right(struct tree_balance *tb, 29 struct buffer_info *bi) 30 { 31 bi->tb = tb; 32 bi->bi_bh = tb->R[0]; 33 bi->bi_parent = tb->FR[0]; 34 bi->bi_position = get_right_neighbor_position(tb, 0); 35 } 36 37 static inline void buffer_info_init_tbS0(struct tree_balance *tb, 38 struct buffer_info *bi) 39 { 40 bi->tb = tb; 41 bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); 42 bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 43 bi->bi_position = PATH_H_POSITION(tb->tb_path, 1); 44 } 45 46 static inline void buffer_info_init_bh(struct tree_balance *tb, 47 struct buffer_info *bi, 48 struct buffer_head *bh) 49 { 50 bi->tb = tb; 51 bi->bi_bh = bh; 52 bi->bi_parent = NULL; 53 bi->bi_position = 0; 54 } 55 56 inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, 57 struct buffer_head *bh, int flag) 58 { 59 journal_mark_dirty(tb->transaction_handle, bh); 60 } 61 62 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 63 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 64 65 /* 66 * summary: 67 * if deleting something ( tb->insert_size[0] < 0 ) 68 * return(balance_leaf_when_delete()); (flag d handled here) 69 * else 70 * if lnum is larger than 0 we put items into the left node 71 * if rnum is larger than 0 we put items into the right node 72 * if snum1 is larger than 0 we put items into the new node s1 73 * if snum2 is larger than 0 we put items into the new node s2 74 * Note that all *num* count new items being created. 75 */ 76 77 static void balance_leaf_when_delete_del(struct tree_balance *tb) 78 { 79 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 80 int item_pos = PATH_LAST_POSITION(tb->tb_path); 81 struct buffer_info bi; 82 #ifdef CONFIG_REISERFS_CHECK 83 struct item_head *ih = item_head(tbS0, item_pos); 84 #endif 85 86 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0], 87 "vs-12013: mode Delete, insert size %d, ih to be deleted %h", 88 -tb->insert_size[0], ih); 89 90 buffer_info_init_tbS0(tb, &bi); 91 leaf_delete_items(&bi, 0, item_pos, 1, -1); 92 93 if (!item_pos && tb->CFL[0]) { 94 if (B_NR_ITEMS(tbS0)) { 95 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 96 } else { 97 if (!PATH_H_POSITION(tb->tb_path, 1)) 98 replace_key(tb, tb->CFL[0], tb->lkey[0], 99 PATH_H_PPARENT(tb->tb_path, 0), 0); 100 } 101 } 102 103 RFALSE(!item_pos && !tb->CFL[0], 104 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], 105 tb->L[0]); 106 } 107 108 /* cut item in S[0] */ 109 static void balance_leaf_when_delete_cut(struct tree_balance *tb) 110 { 111 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 112 int item_pos = PATH_LAST_POSITION(tb->tb_path); 113 struct item_head *ih = item_head(tbS0, item_pos); 114 int pos_in_item = tb->tb_path->pos_in_item; 115 struct buffer_info bi; 116 buffer_info_init_tbS0(tb, &bi); 117 118 if (is_direntry_le_ih(ih)) { 119 /* 120 * UFS unlink semantics are such that you can only 121 * delete one directory entry at a time. 122 * 123 * when we cut a directory tb->insert_size[0] means 124 * number of entries to be cut (always 1) 125 */ 126 tb->insert_size[0] = -1; 127 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 128 -tb->insert_size[0]); 129 130 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0], 131 "PAP-12030: can not change delimiting key. CFL[0]=%p", 132 tb->CFL[0]); 133 134 if (!item_pos && !pos_in_item && tb->CFL[0]) 135 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 136 } else { 137 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 138 -tb->insert_size[0]); 139 140 RFALSE(!ih_item_len(ih), 141 "PAP-12035: cut must leave non-zero dynamic " 142 "length of item"); 143 } 144 } 145 146 static int balance_leaf_when_delete_left(struct tree_balance *tb) 147 { 148 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 149 int n = B_NR_ITEMS(tbS0); 150 151 /* L[0] must be joined with S[0] */ 152 if (tb->lnum[0] == -1) { 153 /* R[0] must be also joined with S[0] */ 154 if (tb->rnum[0] == -1) { 155 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { 156 /* 157 * all contents of all the 158 * 3 buffers will be in L[0] 159 */ 160 if (PATH_H_POSITION(tb->tb_path, 1) == 0 && 161 1 < B_NR_ITEMS(tb->FR[0])) 162 replace_key(tb, tb->CFL[0], 163 tb->lkey[0], tb->FR[0], 1); 164 165 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1, 166 NULL); 167 leaf_move_items(LEAF_FROM_R_TO_L, tb, 168 B_NR_ITEMS(tb->R[0]), -1, 169 NULL); 170 171 reiserfs_invalidate_buffer(tb, tbS0); 172 reiserfs_invalidate_buffer(tb, tb->R[0]); 173 174 return 0; 175 } 176 177 /* all contents of all the 3 buffers will be in R[0] */ 178 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL); 179 leaf_move_items(LEAF_FROM_L_TO_R, tb, 180 B_NR_ITEMS(tb->L[0]), -1, NULL); 181 182 /* right_delimiting_key is correct in R[0] */ 183 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 184 185 reiserfs_invalidate_buffer(tb, tbS0); 186 reiserfs_invalidate_buffer(tb, tb->L[0]); 187 188 return -1; 189 } 190 191 RFALSE(tb->rnum[0] != 0, 192 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]); 193 /* all contents of L[0] and S[0] will be in L[0] */ 194 leaf_shift_left(tb, n, -1); 195 196 reiserfs_invalidate_buffer(tb, tbS0); 197 198 return 0; 199 } 200 201 /* 202 * a part of contents of S[0] will be in L[0] and 203 * the rest part of S[0] will be in R[0] 204 */ 205 206 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || 207 (tb->lnum[0] + tb->rnum[0] > n + 1), 208 "PAP-12050: rnum(%d) and lnum(%d) and item " 209 "number(%d) in S[0] are not consistent", 210 tb->rnum[0], tb->lnum[0], n); 211 RFALSE((tb->lnum[0] + tb->rnum[0] == n) && 212 (tb->lbytes != -1 || tb->rbytes != -1), 213 "PAP-12055: bad rbytes (%d)/lbytes (%d) " 214 "parameters when items are not split", 215 tb->rbytes, tb->lbytes); 216 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && 217 (tb->lbytes < 1 || tb->rbytes != -1), 218 "PAP-12060: bad rbytes (%d)/lbytes (%d) " 219 "parameters when items are split", 220 tb->rbytes, tb->lbytes); 221 222 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 223 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 224 225 reiserfs_invalidate_buffer(tb, tbS0); 226 227 return 0; 228 } 229 230 /* 231 * Balance leaf node in case of delete or cut: insert_size[0] < 0 232 * 233 * lnum, rnum can have values >= -1 234 * -1 means that the neighbor must be joined with S 235 * 0 means that nothing should be done with the neighbor 236 * >0 means to shift entirely or partly the specified number of items 237 * to the neighbor 238 */ 239 static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 240 { 241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 242 int item_pos = PATH_LAST_POSITION(tb->tb_path); 243 struct buffer_info bi; 244 int n; 245 struct item_head *ih; 246 247 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, 248 "vs- 12000: level: wrong FR %z", tb->FR[0]); 249 RFALSE(tb->blknum[0] > 1, 250 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]); 251 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), 252 "PAP-12010: tree can not be empty"); 253 254 ih = item_head(tbS0, item_pos); 255 buffer_info_init_tbS0(tb, &bi); 256 257 /* Delete or truncate the item */ 258 259 BUG_ON(flag != M_DELETE && flag != M_CUT); 260 if (flag == M_DELETE) 261 balance_leaf_when_delete_del(tb); 262 else /* M_CUT */ 263 balance_leaf_when_delete_cut(tb); 264 265 266 /* 267 * the rule is that no shifting occurs unless by shifting 268 * a node can be freed 269 */ 270 n = B_NR_ITEMS(tbS0); 271 272 273 /* L[0] takes part in balancing */ 274 if (tb->lnum[0]) 275 return balance_leaf_when_delete_left(tb); 276 277 if (tb->rnum[0] == -1) { 278 /* all contents of R[0] and S[0] will be in R[0] */ 279 leaf_shift_right(tb, n, -1); 280 reiserfs_invalidate_buffer(tb, tbS0); 281 return 0; 282 } 283 284 RFALSE(tb->rnum[0], 285 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]); 286 return 0; 287 } 288 289 static unsigned int balance_leaf_insert_left(struct tree_balance *tb, 290 struct item_head *const ih, 291 const char * const body) 292 { 293 int ret; 294 struct buffer_info bi; 295 int n = B_NR_ITEMS(tb->L[0]); 296 unsigned body_shift_bytes = 0; 297 298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 299 /* part of new item falls into L[0] */ 300 int new_item_len, shift; 301 int version; 302 303 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1); 304 305 /* Calculate item length to insert to S[0] */ 306 new_item_len = ih_item_len(ih) - tb->lbytes; 307 308 /* Calculate and check item length to insert to L[0] */ 309 put_ih_item_len(ih, ih_item_len(ih) - new_item_len); 310 311 RFALSE(ih_item_len(ih) <= 0, 312 "PAP-12080: there is nothing to insert into L[0]: " 313 "ih_item_len=%d", ih_item_len(ih)); 314 315 /* Insert new item into L[0] */ 316 buffer_info_init_left(tb, &bi); 317 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, 318 min_t(int, tb->zeroes_num, ih_item_len(ih))); 319 320 version = ih_version(ih); 321 322 /* 323 * Calculate key component, item length and body to 324 * insert into S[0] 325 */ 326 shift = 0; 327 if (is_indirect_le_ih(ih)) 328 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 329 330 add_le_ih_k_offset(ih, tb->lbytes << shift); 331 332 put_ih_item_len(ih, new_item_len); 333 if (tb->lbytes > tb->zeroes_num) { 334 body_shift_bytes = tb->lbytes - tb->zeroes_num; 335 tb->zeroes_num = 0; 336 } else 337 tb->zeroes_num -= tb->lbytes; 338 339 RFALSE(ih_item_len(ih) <= 0, 340 "PAP-12085: there is nothing to insert into S[0]: " 341 "ih_item_len=%d", ih_item_len(ih)); 342 } else { 343 /* new item in whole falls into L[0] */ 344 /* Shift lnum[0]-1 items to L[0] */ 345 ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); 346 347 /* Insert new item into L[0] */ 348 buffer_info_init_left(tb, &bi); 349 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, 350 tb->zeroes_num); 351 tb->insert_size[0] = 0; 352 tb->zeroes_num = 0; 353 } 354 return body_shift_bytes; 355 } 356 357 static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, 358 struct item_head * const ih, 359 const char * const body) 360 { 361 int n = B_NR_ITEMS(tb->L[0]); 362 struct buffer_info bi; 363 364 RFALSE(tb->zeroes_num, 365 "PAP-12090: invalid parameter in case of a directory"); 366 367 /* directory item */ 368 if (tb->lbytes > tb->pos_in_item) { 369 /* new directory entry falls into L[0] */ 370 struct item_head *pasted; 371 int ret, l_pos_in_item = tb->pos_in_item; 372 373 /* 374 * Shift lnum[0] - 1 items in whole. 375 * Shift lbytes - 1 entries from given directory item 376 */ 377 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1); 378 if (ret && !tb->item_pos) { 379 pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); 380 l_pos_in_item += ih_entry_count(pasted) - 381 (tb->lbytes - 1); 382 } 383 384 /* Append given directory entry to directory item */ 385 buffer_info_init_left(tb, &bi); 386 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, 387 l_pos_in_item, tb->insert_size[0], 388 body, tb->zeroes_num); 389 390 /* 391 * previous string prepared space for pasting new entry, 392 * following string pastes this entry 393 */ 394 395 /* 396 * when we have merge directory item, pos_in_item 397 * has been changed too 398 */ 399 400 /* paste new directory entry. 1 is entry number */ 401 leaf_paste_entries(&bi, n + tb->item_pos - ret, 402 l_pos_in_item, 1, 403 (struct reiserfs_de_head *) body, 404 body + DEH_SIZE, tb->insert_size[0]); 405 tb->insert_size[0] = 0; 406 } else { 407 /* new directory item doesn't fall into L[0] */ 408 /* 409 * Shift lnum[0]-1 items in whole. Shift lbytes 410 * directory entries from directory item number lnum[0] 411 */ 412 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 413 } 414 415 /* Calculate new position to append in item body */ 416 tb->pos_in_item -= tb->lbytes; 417 } 418 419 static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, 420 struct item_head * const ih, 421 const char * const body) 422 { 423 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 424 int n = B_NR_ITEMS(tb->L[0]); 425 struct buffer_info bi; 426 int body_shift_bytes = 0; 427 428 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 429 balance_leaf_paste_left_shift_dirent(tb, ih, body); 430 return 0; 431 } 432 433 RFALSE(tb->lbytes <= 0, 434 "PAP-12095: there is nothing to shift to L[0]. " 435 "lbytes=%d", tb->lbytes); 436 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), 437 "PAP-12100: incorrect position to paste: " 438 "item_len=%d, pos_in_item=%d", 439 ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item); 440 441 /* appended item will be in L[0] in whole */ 442 if (tb->lbytes >= tb->pos_in_item) { 443 struct item_head *tbS0_pos_ih, *tbL0_ih; 444 struct item_head *tbS0_0_ih; 445 struct reiserfs_key *left_delim_key; 446 int ret, l_n, version, temp_l; 447 448 tbS0_pos_ih = item_head(tbS0, tb->item_pos); 449 tbS0_0_ih = item_head(tbS0, 0); 450 451 /* 452 * this bytes number must be appended 453 * to the last item of L[h] 454 */ 455 l_n = tb->lbytes - tb->pos_in_item; 456 457 /* Calculate new insert_size[0] */ 458 tb->insert_size[0] -= l_n; 459 460 RFALSE(tb->insert_size[0] <= 0, 461 "PAP-12105: there is nothing to paste into " 462 "L[0]. insert_size=%d", tb->insert_size[0]); 463 464 ret = leaf_shift_left(tb, tb->lnum[0], 465 ih_item_len(tbS0_pos_ih)); 466 467 tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret); 468 469 /* Append to body of item in L[0] */ 470 buffer_info_init_left(tb, &bi); 471 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, 472 ih_item_len(tbL0_ih), l_n, body, 473 min_t(int, l_n, tb->zeroes_num)); 474 475 /* 476 * 0-th item in S0 can be only of DIRECT type 477 * when l_n != 0 478 */ 479 temp_l = l_n; 480 481 RFALSE(ih_item_len(tbS0_0_ih), 482 "PAP-12106: item length must be 0"); 483 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key, 484 leaf_key(tb->L[0], n + tb->item_pos - ret)), 485 "PAP-12107: items must be of the same file"); 486 487 if (is_indirect_le_ih(tbL0_ih)) { 488 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 489 temp_l = l_n << shift; 490 } 491 /* update key of first item in S0 */ 492 version = ih_version(tbS0_0_ih); 493 add_le_key_k_offset(version, &tbS0_0_ih->ih_key, temp_l); 494 495 /* update left delimiting key */ 496 left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]); 497 add_le_key_k_offset(version, left_delim_key, temp_l); 498 499 /* 500 * Calculate new body, position in item and 501 * insert_size[0] 502 */ 503 if (l_n > tb->zeroes_num) { 504 body_shift_bytes = l_n - tb->zeroes_num; 505 tb->zeroes_num = 0; 506 } else 507 tb->zeroes_num -= l_n; 508 tb->pos_in_item = 0; 509 510 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key, 511 leaf_key(tb->L[0], 512 B_NR_ITEMS(tb->L[0]) - 1)) || 513 !op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size) || 514 !op_is_left_mergeable(left_delim_key, tbS0->b_size), 515 "PAP-12120: item must be merge-able with left " 516 "neighboring item"); 517 } else { 518 /* only part of the appended item will be in L[0] */ 519 520 /* Calculate position in item for append in S[0] */ 521 tb->pos_in_item -= tb->lbytes; 522 523 RFALSE(tb->pos_in_item <= 0, 524 "PAP-12125: no place for paste. pos_in_item=%d", 525 tb->pos_in_item); 526 527 /* 528 * Shift lnum[0] - 1 items in whole. 529 * Shift lbytes - 1 byte from item number lnum[0] 530 */ 531 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 532 } 533 return body_shift_bytes; 534 } 535 536 537 /* appended item will be in L[0] in whole */ 538 static void balance_leaf_paste_left_whole(struct tree_balance *tb, 539 struct item_head * const ih, 540 const char * const body) 541 { 542 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 543 int n = B_NR_ITEMS(tb->L[0]); 544 struct buffer_info bi; 545 struct item_head *pasted; 546 int ret; 547 548 /* if we paste into first item of S[0] and it is left mergable */ 549 if (!tb->item_pos && 550 op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size)) { 551 /* 552 * then increment pos_in_item by the size of the 553 * last item in L[0] 554 */ 555 pasted = item_head(tb->L[0], n - 1); 556 if (is_direntry_le_ih(pasted)) 557 tb->pos_in_item += ih_entry_count(pasted); 558 else 559 tb->pos_in_item += ih_item_len(pasted); 560 } 561 562 /* 563 * Shift lnum[0] - 1 items in whole. 564 * Shift lbytes - 1 byte from item number lnum[0] 565 */ 566 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 567 568 /* Append to body of item in L[0] */ 569 buffer_info_init_left(tb, &bi); 570 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item, 571 tb->insert_size[0], body, tb->zeroes_num); 572 573 /* if appended item is directory, paste entry */ 574 pasted = item_head(tb->L[0], n + tb->item_pos - ret); 575 if (is_direntry_le_ih(pasted)) 576 leaf_paste_entries(&bi, n + tb->item_pos - ret, 577 tb->pos_in_item, 1, 578 (struct reiserfs_de_head *)body, 579 body + DEH_SIZE, tb->insert_size[0]); 580 581 /* 582 * if appended item is indirect item, put unformatted node 583 * into un list 584 */ 585 if (is_indirect_le_ih(pasted)) 586 set_ih_free_space(pasted, 0); 587 588 tb->insert_size[0] = 0; 589 tb->zeroes_num = 0; 590 } 591 592 static unsigned int balance_leaf_paste_left(struct tree_balance *tb, 593 struct item_head * const ih, 594 const char * const body) 595 { 596 /* we must shift the part of the appended item */ 597 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) 598 return balance_leaf_paste_left_shift(tb, ih, body); 599 else 600 balance_leaf_paste_left_whole(tb, ih, body); 601 return 0; 602 } 603 604 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 605 static unsigned int balance_leaf_left(struct tree_balance *tb, 606 struct item_head * const ih, 607 const char * const body, int flag) 608 { 609 if (tb->lnum[0] <= 0) 610 return 0; 611 612 /* new item or it part falls to L[0], shift it too */ 613 if (tb->item_pos < tb->lnum[0]) { 614 BUG_ON(flag != M_INSERT && flag != M_PASTE); 615 616 if (flag == M_INSERT) 617 return balance_leaf_insert_left(tb, ih, body); 618 else /* M_PASTE */ 619 return balance_leaf_paste_left(tb, ih, body); 620 } else 621 /* new item doesn't fall into L[0] */ 622 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 623 return 0; 624 } 625 626 627 static void balance_leaf_insert_right(struct tree_balance *tb, 628 struct item_head * const ih, 629 const char * const body) 630 { 631 632 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 633 int n = B_NR_ITEMS(tbS0); 634 struct buffer_info bi; 635 int ret; 636 637 /* new item or part of it doesn't fall into R[0] */ 638 if (n - tb->rnum[0] >= tb->item_pos) { 639 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 640 return; 641 } 642 643 /* new item or its part falls to R[0] */ 644 645 /* part of new item falls into R[0] */ 646 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { 647 loff_t old_key_comp, old_len, r_zeroes_number; 648 const char *r_body; 649 int version, shift; 650 loff_t offset; 651 652 leaf_shift_right(tb, tb->rnum[0] - 1, -1); 653 654 version = ih_version(ih); 655 656 /* Remember key component and item length */ 657 old_key_comp = le_ih_k_offset(ih); 658 old_len = ih_item_len(ih); 659 660 /* 661 * Calculate key component and item length to insert 662 * into R[0] 663 */ 664 shift = 0; 665 if (is_indirect_le_ih(ih)) 666 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 667 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift); 668 set_le_ih_k_offset(ih, offset); 669 put_ih_item_len(ih, tb->rbytes); 670 671 /* Insert part of the item into R[0] */ 672 buffer_info_init_right(tb, &bi); 673 if ((old_len - tb->rbytes) > tb->zeroes_num) { 674 r_zeroes_number = 0; 675 r_body = body + (old_len - tb->rbytes) - tb->zeroes_num; 676 } else { 677 r_body = body; 678 r_zeroes_number = tb->zeroes_num - 679 (old_len - tb->rbytes); 680 tb->zeroes_num -= r_zeroes_number; 681 } 682 683 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number); 684 685 /* Replace right delimiting key by first key in R[0] */ 686 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 687 688 /* 689 * Calculate key component and item length to 690 * insert into S[0] 691 */ 692 set_le_ih_k_offset(ih, old_key_comp); 693 put_ih_item_len(ih, old_len - tb->rbytes); 694 695 tb->insert_size[0] -= tb->rbytes; 696 697 } else { 698 /* whole new item falls into R[0] */ 699 700 /* Shift rnum[0]-1 items to R[0] */ 701 ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); 702 703 /* Insert new item into R[0] */ 704 buffer_info_init_right(tb, &bi); 705 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1, 706 ih, body, tb->zeroes_num); 707 708 if (tb->item_pos - n + tb->rnum[0] - 1 == 0) 709 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 710 711 tb->zeroes_num = tb->insert_size[0] = 0; 712 } 713 } 714 715 716 static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, 717 struct item_head * const ih, 718 const char * const body) 719 { 720 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 721 struct buffer_info bi; 722 int entry_count; 723 724 RFALSE(tb->zeroes_num, 725 "PAP-12145: invalid parameter in case of a directory"); 726 entry_count = ih_entry_count(item_head(tbS0, tb->item_pos)); 727 728 /* new directory entry falls into R[0] */ 729 if (entry_count - tb->rbytes < tb->pos_in_item) { 730 int paste_entry_position; 731 732 RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0], 733 "PAP-12150: no enough of entries to shift to R[0]: " 734 "rbytes=%d, entry_count=%d", tb->rbytes, entry_count); 735 736 /* 737 * Shift rnum[0]-1 items in whole. 738 * Shift rbytes-1 directory entries from directory 739 * item number rnum[0] 740 */ 741 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); 742 743 /* Paste given directory entry to directory item */ 744 paste_entry_position = tb->pos_in_item - entry_count + 745 tb->rbytes - 1; 746 buffer_info_init_right(tb, &bi); 747 leaf_paste_in_buffer(&bi, 0, paste_entry_position, 748 tb->insert_size[0], body, tb->zeroes_num); 749 750 /* paste entry */ 751 leaf_paste_entries(&bi, 0, paste_entry_position, 1, 752 (struct reiserfs_de_head *) body, 753 body + DEH_SIZE, tb->insert_size[0]); 754 755 /* change delimiting keys */ 756 if (paste_entry_position == 0) 757 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 758 759 tb->insert_size[0] = 0; 760 tb->pos_in_item++; 761 } else { 762 /* new directory entry doesn't fall into R[0] */ 763 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 764 } 765 } 766 767 static void balance_leaf_paste_right_shift(struct tree_balance *tb, 768 struct item_head * const ih, 769 const char * const body) 770 { 771 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 772 int n_shift, n_rem, r_zeroes_number, version; 773 unsigned long temp_rem; 774 const char *r_body; 775 struct buffer_info bi; 776 777 /* we append to directory item */ 778 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 779 balance_leaf_paste_right_shift_dirent(tb, ih, body); 780 return; 781 } 782 783 /* regular object */ 784 785 /* 786 * Calculate number of bytes which must be shifted 787 * from appended item 788 */ 789 n_shift = tb->rbytes - tb->insert_size[0]; 790 if (n_shift < 0) 791 n_shift = 0; 792 793 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), 794 "PAP-12155: invalid position to paste. ih_item_len=%d, " 795 "pos_in_item=%d", tb->pos_in_item, 796 ih_item_len(item_head(tbS0, tb->item_pos))); 797 798 leaf_shift_right(tb, tb->rnum[0], n_shift); 799 800 /* 801 * Calculate number of bytes which must remain in body 802 * after appending to R[0] 803 */ 804 n_rem = tb->insert_size[0] - tb->rbytes; 805 if (n_rem < 0) 806 n_rem = 0; 807 808 temp_rem = n_rem; 809 810 version = ih_version(item_head(tb->R[0], 0)); 811 812 if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) { 813 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 814 temp_rem = n_rem << shift; 815 } 816 817 add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem); 818 add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]), 819 temp_rem); 820 821 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); 822 823 /* Append part of body into R[0] */ 824 buffer_info_init_right(tb, &bi); 825 if (n_rem > tb->zeroes_num) { 826 r_zeroes_number = 0; 827 r_body = body + n_rem - tb->zeroes_num; 828 } else { 829 r_body = body; 830 r_zeroes_number = tb->zeroes_num - n_rem; 831 tb->zeroes_num -= r_zeroes_number; 832 } 833 834 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, 835 r_body, r_zeroes_number); 836 837 if (is_indirect_le_ih(item_head(tb->R[0], 0))) 838 set_ih_free_space(item_head(tb->R[0], 0), 0); 839 840 tb->insert_size[0] = n_rem; 841 if (!n_rem) 842 tb->pos_in_item++; 843 } 844 845 static void balance_leaf_paste_right_whole(struct tree_balance *tb, 846 struct item_head * const ih, 847 const char * const body) 848 { 849 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 850 int n = B_NR_ITEMS(tbS0); 851 struct item_head *pasted; 852 struct buffer_info bi; 853 854 buffer_info_init_right(tb, &bi); 855 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 856 857 /* append item in R[0] */ 858 if (tb->pos_in_item >= 0) { 859 buffer_info_init_right(tb, &bi); 860 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0], 861 tb->pos_in_item, tb->insert_size[0], body, 862 tb->zeroes_num); 863 } 864 865 /* paste new entry, if item is directory item */ 866 pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]); 867 if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) { 868 leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0], 869 tb->pos_in_item, 1, 870 (struct reiserfs_de_head *)body, 871 body + DEH_SIZE, tb->insert_size[0]); 872 873 if (!tb->pos_in_item) { 874 875 RFALSE(tb->item_pos - n + tb->rnum[0], 876 "PAP-12165: directory item must be first " 877 "item of node when pasting is in 0th position"); 878 879 /* update delimiting keys */ 880 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 881 } 882 } 883 884 if (is_indirect_le_ih(pasted)) 885 set_ih_free_space(pasted, 0); 886 tb->zeroes_num = tb->insert_size[0] = 0; 887 } 888 889 static void balance_leaf_paste_right(struct tree_balance *tb, 890 struct item_head * const ih, 891 const char * const body) 892 { 893 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 894 int n = B_NR_ITEMS(tbS0); 895 896 /* new item doesn't fall into R[0] */ 897 if (n - tb->rnum[0] > tb->item_pos) { 898 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 899 return; 900 } 901 902 /* pasted item or part of it falls to R[0] */ 903 904 if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1) 905 /* we must shift the part of the appended item */ 906 balance_leaf_paste_right_shift(tb, ih, body); 907 else 908 /* pasted item in whole falls into R[0] */ 909 balance_leaf_paste_right_whole(tb, ih, body); 910 } 911 912 /* shift rnum[0] items from S[0] to the right neighbor R[0] */ 913 static void balance_leaf_right(struct tree_balance *tb, 914 struct item_head * const ih, 915 const char * const body, int flag) 916 { 917 if (tb->rnum[0] <= 0) 918 return; 919 920 BUG_ON(flag != M_INSERT && flag != M_PASTE); 921 922 if (flag == M_INSERT) 923 balance_leaf_insert_right(tb, ih, body); 924 else /* M_PASTE */ 925 balance_leaf_paste_right(tb, ih, body); 926 } 927 928 static void balance_leaf_new_nodes_insert(struct tree_balance *tb, 929 struct item_head * const ih, 930 const char * const body, 931 struct item_head *insert_key, 932 struct buffer_head **insert_ptr, 933 int i) 934 { 935 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 936 int n = B_NR_ITEMS(tbS0); 937 struct buffer_info bi; 938 int shift; 939 940 /* new item or it part don't falls into S_new[i] */ 941 if (n - tb->snum[i] >= tb->item_pos) { 942 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 943 tb->snum[i], tb->sbytes[i], tb->S_new[i]); 944 return; 945 } 946 947 /* new item or it's part falls to first new node S_new[i] */ 948 949 /* part of new item falls into S_new[i] */ 950 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) { 951 int old_key_comp, old_len, r_zeroes_number; 952 const char *r_body; 953 int version; 954 955 /* Move snum[i]-1 items from S[0] to S_new[i] */ 956 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1, 957 tb->S_new[i]); 958 959 /* Remember key component and item length */ 960 version = ih_version(ih); 961 old_key_comp = le_ih_k_offset(ih); 962 old_len = ih_item_len(ih); 963 964 /* 965 * Calculate key component and item length to insert 966 * into S_new[i] 967 */ 968 shift = 0; 969 if (is_indirect_le_ih(ih)) 970 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 971 set_le_ih_k_offset(ih, 972 le_ih_k_offset(ih) + 973 ((old_len - tb->sbytes[i]) << shift)); 974 975 put_ih_item_len(ih, tb->sbytes[i]); 976 977 /* Insert part of the item into S_new[i] before 0-th item */ 978 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 979 980 if ((old_len - tb->sbytes[i]) > tb->zeroes_num) { 981 r_zeroes_number = 0; 982 r_body = body + (old_len - tb->sbytes[i]) - 983 tb->zeroes_num; 984 } else { 985 r_body = body; 986 r_zeroes_number = tb->zeroes_num - (old_len - 987 tb->sbytes[i]); 988 tb->zeroes_num -= r_zeroes_number; 989 } 990 991 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number); 992 993 /* 994 * Calculate key component and item length to 995 * insert into S[i] 996 */ 997 set_le_ih_k_offset(ih, old_key_comp); 998 put_ih_item_len(ih, old_len - tb->sbytes[i]); 999 tb->insert_size[0] -= tb->sbytes[i]; 1000 } else { 1001 /* whole new item falls into S_new[i] */ 1002 1003 /* 1004 * Shift snum[0] - 1 items to S_new[i] 1005 * (sbytes[i] of split item) 1006 */ 1007 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1008 tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]); 1009 1010 /* Insert new item into S_new[i] */ 1011 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1012 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1, 1013 ih, body, tb->zeroes_num); 1014 1015 tb->zeroes_num = tb->insert_size[0] = 0; 1016 } 1017 } 1018 1019 /* we append to directory item */ 1020 static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, 1021 struct item_head * const ih, 1022 const char * const body, 1023 struct item_head *insert_key, 1024 struct buffer_head **insert_ptr, 1025 int i) 1026 { 1027 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1028 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); 1029 int entry_count = ih_entry_count(aux_ih); 1030 struct buffer_info bi; 1031 1032 if (entry_count - tb->sbytes[i] < tb->pos_in_item && 1033 tb->pos_in_item <= entry_count) { 1034 /* new directory entry falls into S_new[i] */ 1035 1036 RFALSE(!tb->insert_size[0], 1037 "PAP-12215: insert_size is already 0"); 1038 RFALSE(tb->sbytes[i] - 1 >= entry_count, 1039 "PAP-12220: there are no so much entries (%d), only %d", 1040 tb->sbytes[i] - 1, entry_count); 1041 1042 /* 1043 * Shift snum[i]-1 items in whole. 1044 * Shift sbytes[i] directory entries 1045 * from directory item number snum[i] 1046 */ 1047 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1048 tb->sbytes[i] - 1, tb->S_new[i]); 1049 1050 /* 1051 * Paste given directory entry to 1052 * directory item 1053 */ 1054 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1055 leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count + 1056 tb->sbytes[i] - 1, tb->insert_size[0], 1057 body, tb->zeroes_num); 1058 1059 /* paste new directory entry */ 1060 leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count + 1061 tb->sbytes[i] - 1, 1, 1062 (struct reiserfs_de_head *) body, 1063 body + DEH_SIZE, tb->insert_size[0]); 1064 1065 tb->insert_size[0] = 0; 1066 tb->pos_in_item++; 1067 } else { 1068 /* new directory entry doesn't fall into S_new[i] */ 1069 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1070 tb->sbytes[i], tb->S_new[i]); 1071 } 1072 1073 } 1074 1075 static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, 1076 struct item_head * const ih, 1077 const char * const body, 1078 struct item_head *insert_key, 1079 struct buffer_head **insert_ptr, 1080 int i) 1081 { 1082 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1083 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); 1084 int n_shift, n_rem, r_zeroes_number, shift; 1085 const char *r_body; 1086 struct item_head *tmp; 1087 struct buffer_info bi; 1088 1089 RFALSE(ih, "PAP-12210: ih must be 0"); 1090 1091 if (is_direntry_le_ih(aux_ih)) { 1092 balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key, 1093 insert_ptr, i); 1094 return; 1095 } 1096 1097 /* regular object */ 1098 1099 1100 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) || 1101 tb->insert_size[0] <= 0, 1102 "PAP-12225: item too short or insert_size <= 0"); 1103 1104 /* 1105 * Calculate number of bytes which must be shifted from appended item 1106 */ 1107 n_shift = tb->sbytes[i] - tb->insert_size[0]; 1108 if (n_shift < 0) 1109 n_shift = 0; 1110 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift, 1111 tb->S_new[i]); 1112 1113 /* 1114 * Calculate number of bytes which must remain in body after 1115 * append to S_new[i] 1116 */ 1117 n_rem = tb->insert_size[0] - tb->sbytes[i]; 1118 if (n_rem < 0) 1119 n_rem = 0; 1120 1121 /* Append part of body into S_new[0] */ 1122 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1123 if (n_rem > tb->zeroes_num) { 1124 r_zeroes_number = 0; 1125 r_body = body + n_rem - tb->zeroes_num; 1126 } else { 1127 r_body = body; 1128 r_zeroes_number = tb->zeroes_num - n_rem; 1129 tb->zeroes_num -= r_zeroes_number; 1130 } 1131 1132 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, 1133 r_body, r_zeroes_number); 1134 1135 tmp = item_head(tb->S_new[i], 0); 1136 shift = 0; 1137 if (is_indirect_le_ih(tmp)) { 1138 set_ih_free_space(tmp, 0); 1139 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 1140 } 1141 add_le_ih_k_offset(tmp, n_rem << shift); 1142 1143 tb->insert_size[0] = n_rem; 1144 if (!n_rem) 1145 tb->pos_in_item++; 1146 } 1147 1148 static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, 1149 struct item_head * const ih, 1150 const char * const body, 1151 struct item_head *insert_key, 1152 struct buffer_head **insert_ptr, 1153 int i) 1154 1155 { 1156 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1157 int n = B_NR_ITEMS(tbS0); 1158 int leaf_mi; 1159 struct item_head *pasted; 1160 struct buffer_info bi; 1161 1162 #ifdef CONFIG_REISERFS_CHECK 1163 struct item_head *ih_check = item_head(tbS0, tb->item_pos); 1164 1165 if (!is_direntry_le_ih(ih_check) && 1166 (tb->pos_in_item != ih_item_len(ih_check) || 1167 tb->insert_size[0] <= 0)) 1168 reiserfs_panic(tb->tb_sb, 1169 "PAP-12235", 1170 "pos_in_item must be equal to ih_item_len"); 1171 #endif 1172 1173 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1174 tb->sbytes[i], tb->S_new[i]); 1175 1176 RFALSE(leaf_mi, 1177 "PAP-12240: unexpected value returned by leaf_move_items (%d)", 1178 leaf_mi); 1179 1180 /* paste into item */ 1181 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1182 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i], 1183 tb->pos_in_item, tb->insert_size[0], 1184 body, tb->zeroes_num); 1185 1186 pasted = item_head(tb->S_new[i], tb->item_pos - n + 1187 tb->snum[i]); 1188 if (is_direntry_le_ih(pasted)) 1189 leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i], 1190 tb->pos_in_item, 1, 1191 (struct reiserfs_de_head *)body, 1192 body + DEH_SIZE, tb->insert_size[0]); 1193 1194 /* if we paste to indirect item update ih_free_space */ 1195 if (is_indirect_le_ih(pasted)) 1196 set_ih_free_space(pasted, 0); 1197 1198 tb->zeroes_num = tb->insert_size[0] = 0; 1199 1200 } 1201 static void balance_leaf_new_nodes_paste(struct tree_balance *tb, 1202 struct item_head * const ih, 1203 const char * const body, 1204 struct item_head *insert_key, 1205 struct buffer_head **insert_ptr, 1206 int i) 1207 { 1208 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1209 int n = B_NR_ITEMS(tbS0); 1210 1211 /* pasted item doesn't fall into S_new[i] */ 1212 if (n - tb->snum[i] > tb->item_pos) { 1213 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1214 tb->snum[i], tb->sbytes[i], tb->S_new[i]); 1215 return; 1216 } 1217 1218 /* pasted item or part if it falls to S_new[i] */ 1219 1220 if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1) 1221 /* we must shift part of the appended item */ 1222 balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key, 1223 insert_ptr, i); 1224 else 1225 /* item falls wholly into S_new[i] */ 1226 balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key, 1227 insert_ptr, i); 1228 } 1229 1230 /* Fill new nodes that appear in place of S[0] */ 1231 static void balance_leaf_new_nodes(struct tree_balance *tb, 1232 struct item_head * const ih, 1233 const char * const body, 1234 struct item_head *insert_key, 1235 struct buffer_head **insert_ptr, 1236 int flag) 1237 { 1238 int i; 1239 for (i = tb->blknum[0] - 2; i >= 0; i--) { 1240 BUG_ON(flag != M_INSERT && flag != M_PASTE); 1241 1242 RFALSE(!tb->snum[i], 1243 "PAP-12200: snum[%d] == %d. Must be > 0", i, 1244 tb->snum[i]); 1245 1246 /* here we shift from S to S_new nodes */ 1247 1248 tb->S_new[i] = get_FEB(tb); 1249 1250 /* initialized block type and tree level */ 1251 set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL); 1252 1253 if (flag == M_INSERT) 1254 balance_leaf_new_nodes_insert(tb, ih, body, insert_key, 1255 insert_ptr, i); 1256 else /* M_PASTE */ 1257 balance_leaf_new_nodes_paste(tb, ih, body, insert_key, 1258 insert_ptr, i); 1259 1260 memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE); 1261 insert_ptr[i] = tb->S_new[i]; 1262 1263 RFALSE(!buffer_journaled(tb->S_new[i]) 1264 || buffer_journal_dirty(tb->S_new[i]) 1265 || buffer_dirty(tb->S_new[i]), 1266 "PAP-12247: S_new[%d] : (%b)", 1267 i, tb->S_new[i]); 1268 } 1269 } 1270 1271 static void balance_leaf_finish_node_insert(struct tree_balance *tb, 1272 struct item_head * const ih, 1273 const char * const body) 1274 { 1275 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1276 struct buffer_info bi; 1277 buffer_info_init_tbS0(tb, &bi); 1278 leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num); 1279 1280 /* If we insert the first key change the delimiting key */ 1281 if (tb->item_pos == 0) { 1282 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1283 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1284 1285 } 1286 } 1287 1288 static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, 1289 struct item_head * const ih, 1290 const char * const body) 1291 { 1292 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1293 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1294 struct buffer_info bi; 1295 1296 if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) { 1297 RFALSE(!tb->insert_size[0], 1298 "PAP-12260: insert_size is 0 already"); 1299 1300 /* prepare space */ 1301 buffer_info_init_tbS0(tb, &bi); 1302 leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item, 1303 tb->insert_size[0], body, tb->zeroes_num); 1304 1305 /* paste entry */ 1306 leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1, 1307 (struct reiserfs_de_head *)body, 1308 body + DEH_SIZE, tb->insert_size[0]); 1309 1310 if (!tb->item_pos && !tb->pos_in_item) { 1311 RFALSE(!tb->CFL[0] || !tb->L[0], 1312 "PAP-12270: CFL[0]/L[0] must be specified"); 1313 if (tb->CFL[0]) 1314 replace_key(tb, tb->CFL[0], tb->lkey[0], 1315 tbS0, 0); 1316 } 1317 1318 tb->insert_size[0] = 0; 1319 } 1320 } 1321 1322 static void balance_leaf_finish_node_paste(struct tree_balance *tb, 1323 struct item_head * const ih, 1324 const char * const body) 1325 { 1326 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1327 struct buffer_info bi; 1328 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1329 1330 /* when directory, may be new entry already pasted */ 1331 if (is_direntry_le_ih(pasted)) { 1332 balance_leaf_finish_node_paste_dirent(tb, ih, body); 1333 return; 1334 } 1335 1336 /* regular object */ 1337 1338 if (tb->pos_in_item == ih_item_len(pasted)) { 1339 RFALSE(tb->insert_size[0] <= 0, 1340 "PAP-12275: insert size must not be %d", 1341 tb->insert_size[0]); 1342 buffer_info_init_tbS0(tb, &bi); 1343 leaf_paste_in_buffer(&bi, tb->item_pos, 1344 tb->pos_in_item, tb->insert_size[0], body, 1345 tb->zeroes_num); 1346 1347 if (is_indirect_le_ih(pasted)) 1348 set_ih_free_space(pasted, 0); 1349 1350 tb->insert_size[0] = 0; 1351 } 1352 #ifdef CONFIG_REISERFS_CHECK 1353 else if (tb->insert_size[0]) { 1354 print_cur_tb("12285"); 1355 reiserfs_panic(tb->tb_sb, "PAP-12285", 1356 "insert_size must be 0 (%d)", tb->insert_size[0]); 1357 } 1358 #endif 1359 } 1360 1361 /* 1362 * if the affected item was not wholly shifted then we 1363 * perform all necessary operations on that part or whole 1364 * of the affected item which remains in S 1365 */ 1366 static void balance_leaf_finish_node(struct tree_balance *tb, 1367 struct item_head * const ih, 1368 const char * const body, int flag) 1369 { 1370 /* if we must insert or append into buffer S[0] */ 1371 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { 1372 if (flag == M_INSERT) 1373 balance_leaf_finish_node_insert(tb, ih, body); 1374 else /* M_PASTE */ 1375 balance_leaf_finish_node_paste(tb, ih, body); 1376 } 1377 } 1378 1379 /** 1380 * balance_leaf - reiserfs tree balancing algorithm 1381 * @tb: tree balance state 1382 * @ih: item header of inserted item (little endian) 1383 * @body: body of inserted item or bytes to paste 1384 * @flag: i - insert, d - delete, c - cut, p - paste (see do_balance) 1385 * passed back: 1386 * @insert_key: key to insert new nodes 1387 * @insert_ptr: array of nodes to insert at the next level 1388 * 1389 * In our processing of one level we sometimes determine what must be 1390 * inserted into the next higher level. This insertion consists of a 1391 * key or two keys and their corresponding pointers. 1392 */ 1393 static int balance_leaf(struct tree_balance *tb, struct item_head *ih, 1394 const char *body, int flag, 1395 struct item_head *insert_key, 1396 struct buffer_head **insert_ptr) 1397 { 1398 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1399 1400 PROC_INFO_INC(tb->tb_sb, balance_at[0]); 1401 1402 /* Make balance in case insert_size[0] < 0 */ 1403 if (tb->insert_size[0] < 0) 1404 return balance_leaf_when_delete(tb, flag); 1405 1406 tb->item_pos = PATH_LAST_POSITION(tb->tb_path), 1407 tb->pos_in_item = tb->tb_path->pos_in_item, 1408 tb->zeroes_num = 0; 1409 if (flag == M_INSERT && !body) 1410 tb->zeroes_num = ih_item_len(ih); 1411 1412 /* 1413 * for indirect item pos_in_item is measured in unformatted node 1414 * pointers. Recalculate to bytes 1415 */ 1416 if (flag != M_INSERT 1417 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) 1418 tb->pos_in_item *= UNFM_P_SIZE; 1419 1420 body += balance_leaf_left(tb, ih, body, flag); 1421 1422 /* tb->lnum[0] > 0 */ 1423 /* Calculate new item position */ 1424 tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0)); 1425 1426 balance_leaf_right(tb, ih, body, flag); 1427 1428 /* tb->rnum[0] > 0 */ 1429 RFALSE(tb->blknum[0] > 3, 1430 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); 1431 RFALSE(tb->blknum[0] < 0, 1432 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); 1433 1434 /* 1435 * if while adding to a node we discover that it is possible to split 1436 * it in two, and merge the left part into the left neighbor and the 1437 * right part into the right neighbor, eliminating the node 1438 */ 1439 if (tb->blknum[0] == 0) { /* node S[0] is empty now */ 1440 1441 RFALSE(!tb->lnum[0] || !tb->rnum[0], 1442 "PAP-12190: lnum and rnum must not be zero"); 1443 /* 1444 * if insertion was done before 0-th position in R[0], right 1445 * delimiting key of the tb->L[0]'s and left delimiting key are 1446 * not set correctly 1447 */ 1448 if (tb->CFL[0]) { 1449 if (!tb->CFR[0]) 1450 reiserfs_panic(tb->tb_sb, "vs-12195", 1451 "CFR not initialized"); 1452 copy_key(internal_key(tb->CFL[0], tb->lkey[0]), 1453 internal_key(tb->CFR[0], tb->rkey[0])); 1454 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); 1455 } 1456 1457 reiserfs_invalidate_buffer(tb, tbS0); 1458 return 0; 1459 } 1460 1461 balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag); 1462 1463 balance_leaf_finish_node(tb, ih, body, flag); 1464 1465 #ifdef CONFIG_REISERFS_CHECK 1466 if (flag == M_PASTE && tb->insert_size[0]) { 1467 print_cur_tb("12290"); 1468 reiserfs_panic(tb->tb_sb, 1469 "PAP-12290", "insert_size is still not 0 (%d)", 1470 tb->insert_size[0]); 1471 } 1472 #endif 1473 1474 /* Leaf level of the tree is balanced (end of balance_leaf) */ 1475 return 0; 1476 } 1477 1478 /* Make empty node */ 1479 void make_empty_node(struct buffer_info *bi) 1480 { 1481 struct block_head *blkh; 1482 1483 RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL"); 1484 1485 blkh = B_BLK_HEAD(bi->bi_bh); 1486 set_blkh_nr_item(blkh, 0); 1487 set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh)); 1488 1489 if (bi->bi_parent) 1490 B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */ 1491 } 1492 1493 /* Get first empty buffer */ 1494 struct buffer_head *get_FEB(struct tree_balance *tb) 1495 { 1496 int i; 1497 struct buffer_info bi; 1498 1499 for (i = 0; i < MAX_FEB_SIZE; i++) 1500 if (tb->FEB[i] != NULL) 1501 break; 1502 1503 if (i == MAX_FEB_SIZE) 1504 reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty"); 1505 1506 buffer_info_init_bh(tb, &bi, tb->FEB[i]); 1507 make_empty_node(&bi); 1508 set_buffer_uptodate(tb->FEB[i]); 1509 tb->used[i] = tb->FEB[i]; 1510 tb->FEB[i] = NULL; 1511 1512 return tb->used[i]; 1513 } 1514 1515 /* This is now used because reiserfs_free_block has to be able to schedule. */ 1516 static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) 1517 { 1518 int i; 1519 1520 if (buffer_dirty(bh)) 1521 reiserfs_warning(tb->tb_sb, "reiserfs-12320", 1522 "called with dirty buffer"); 1523 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) 1524 if (!tb->thrown[i]) { 1525 tb->thrown[i] = bh; 1526 get_bh(bh); /* free_thrown puts this */ 1527 return; 1528 } 1529 reiserfs_warning(tb->tb_sb, "reiserfs-12321", 1530 "too many thrown buffers"); 1531 } 1532 1533 static void free_thrown(struct tree_balance *tb) 1534 { 1535 int i; 1536 b_blocknr_t blocknr; 1537 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) { 1538 if (tb->thrown[i]) { 1539 blocknr = tb->thrown[i]->b_blocknr; 1540 if (buffer_dirty(tb->thrown[i])) 1541 reiserfs_warning(tb->tb_sb, "reiserfs-12322", 1542 "called with dirty buffer %d", 1543 blocknr); 1544 brelse(tb->thrown[i]); /* incremented in store_thrown */ 1545 reiserfs_free_block(tb->transaction_handle, NULL, 1546 blocknr, 0); 1547 } 1548 } 1549 } 1550 1551 void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh) 1552 { 1553 struct block_head *blkh; 1554 blkh = B_BLK_HEAD(bh); 1555 set_blkh_level(blkh, FREE_LEVEL); 1556 set_blkh_nr_item(blkh, 0); 1557 1558 clear_buffer_dirty(bh); 1559 store_thrown(tb, bh); 1560 } 1561 1562 /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/ 1563 void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest, 1564 struct buffer_head *src, int n_src) 1565 { 1566 1567 RFALSE(dest == NULL || src == NULL, 1568 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)", 1569 src, dest); 1570 RFALSE(!B_IS_KEYS_LEVEL(dest), 1571 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf", 1572 dest); 1573 RFALSE(n_dest < 0 || n_src < 0, 1574 "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest); 1575 RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src), 1576 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big", 1577 n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest)); 1578 1579 if (B_IS_ITEMS_LEVEL(src)) 1580 /* source buffer contains leaf node */ 1581 memcpy(internal_key(dest, n_dest), item_head(src, n_src), 1582 KEY_SIZE); 1583 else 1584 memcpy(internal_key(dest, n_dest), internal_key(src, n_src), 1585 KEY_SIZE); 1586 1587 do_balance_mark_internal_dirty(tb, dest, 0); 1588 } 1589 1590 int get_left_neighbor_position(struct tree_balance *tb, int h) 1591 { 1592 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1593 1594 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL, 1595 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist", 1596 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); 1597 1598 if (Sh_position == 0) 1599 return B_NR_ITEMS(tb->FL[h]); 1600 else 1601 return Sh_position - 1; 1602 } 1603 1604 int get_right_neighbor_position(struct tree_balance *tb, int h) 1605 { 1606 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1607 1608 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL, 1609 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist", 1610 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); 1611 1612 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) 1613 return 0; 1614 else 1615 return Sh_position + 1; 1616 } 1617 1618 #ifdef CONFIG_REISERFS_CHECK 1619 1620 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value); 1621 static void check_internal_node(struct super_block *s, struct buffer_head *bh, 1622 char *mes) 1623 { 1624 struct disk_child *dc; 1625 int i; 1626 1627 RFALSE(!bh, "PAP-12336: bh == 0"); 1628 1629 if (!bh || !B_IS_IN_TREE(bh)) 1630 return; 1631 1632 RFALSE(!buffer_dirty(bh) && 1633 !(buffer_journaled(bh) || buffer_journal_dirty(bh)), 1634 "PAP-12337: buffer (%b) must be dirty", bh); 1635 dc = B_N_CHILD(bh, 0); 1636 1637 for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) { 1638 if (!is_reusable(s, dc_block_number(dc), 1)) { 1639 print_cur_tb(mes); 1640 reiserfs_panic(s, "PAP-12338", 1641 "invalid child pointer %y in %b", 1642 dc, bh); 1643 } 1644 } 1645 } 1646 1647 static int locked_or_not_in_tree(struct tree_balance *tb, 1648 struct buffer_head *bh, char *which) 1649 { 1650 if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) || 1651 !B_IS_IN_TREE(bh)) { 1652 reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh); 1653 return 1; 1654 } 1655 return 0; 1656 } 1657 1658 static int check_before_balancing(struct tree_balance *tb) 1659 { 1660 int retval = 0; 1661 1662 if (REISERFS_SB(tb->tb_sb)->cur_tb) { 1663 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " 1664 "occurred based on cur_tb not being null at " 1665 "this point in code. do_balance cannot properly " 1666 "handle concurrent tree accesses on a same " 1667 "mount point."); 1668 } 1669 1670 /* 1671 * double check that buffers that we will modify are unlocked. 1672 * (fix_nodes should already have prepped all of these for us). 1673 */ 1674 if (tb->lnum[0]) { 1675 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]"); 1676 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]"); 1677 retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]"); 1678 check_leaf(tb->L[0]); 1679 } 1680 if (tb->rnum[0]) { 1681 retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]"); 1682 retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]"); 1683 retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]"); 1684 check_leaf(tb->R[0]); 1685 } 1686 retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path), 1687 "S[0]"); 1688 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1689 1690 return retval; 1691 } 1692 1693 static void check_after_balance_leaf(struct tree_balance *tb) 1694 { 1695 if (tb->lnum[0]) { 1696 if (B_FREE_SPACE(tb->L[0]) != 1697 MAX_CHILD_SIZE(tb->L[0]) - 1698 dc_size(B_N_CHILD 1699 (tb->FL[0], get_left_neighbor_position(tb, 0)))) { 1700 print_cur_tb("12221"); 1701 reiserfs_panic(tb->tb_sb, "PAP-12355", 1702 "shift to left was incorrect"); 1703 } 1704 } 1705 if (tb->rnum[0]) { 1706 if (B_FREE_SPACE(tb->R[0]) != 1707 MAX_CHILD_SIZE(tb->R[0]) - 1708 dc_size(B_N_CHILD 1709 (tb->FR[0], get_right_neighbor_position(tb, 0)))) { 1710 print_cur_tb("12222"); 1711 reiserfs_panic(tb->tb_sb, "PAP-12360", 1712 "shift to right was incorrect"); 1713 } 1714 } 1715 if (PATH_H_PBUFFER(tb->tb_path, 1) && 1716 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) != 1717 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1718 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1719 PATH_H_POSITION(tb->tb_path, 1)))))) { 1720 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)); 1721 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1722 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1723 PATH_H_POSITION(tb->tb_path, 1724 1)))); 1725 print_cur_tb("12223"); 1726 reiserfs_warning(tb->tb_sb, "reiserfs-12363", 1727 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; " 1728 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d", 1729 left, 1730 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)), 1731 PATH_H_PBUFFER(tb->tb_path, 1), 1732 PATH_H_POSITION(tb->tb_path, 1), 1733 dc_size(B_N_CHILD 1734 (PATH_H_PBUFFER(tb->tb_path, 1), 1735 PATH_H_POSITION(tb->tb_path, 1))), 1736 right); 1737 reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect"); 1738 } 1739 } 1740 1741 static void check_leaf_level(struct tree_balance *tb) 1742 { 1743 check_leaf(tb->L[0]); 1744 check_leaf(tb->R[0]); 1745 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1746 } 1747 1748 static void check_internal_levels(struct tree_balance *tb) 1749 { 1750 int h; 1751 1752 /* check all internal nodes */ 1753 for (h = 1; tb->insert_size[h]; h++) { 1754 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), 1755 "BAD BUFFER ON PATH"); 1756 if (tb->lnum[h]) 1757 check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); 1758 if (tb->rnum[h]) 1759 check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); 1760 } 1761 1762 } 1763 1764 #endif 1765 1766 /* 1767 * Now we have all of the buffers that must be used in balancing of 1768 * the tree. We rely on the assumption that schedule() will not occur 1769 * while do_balance works. ( Only interrupt handlers are acceptable.) 1770 * We balance the tree according to the analysis made before this, 1771 * using buffers already obtained. For SMP support it will someday be 1772 * necessary to add ordered locking of tb. 1773 */ 1774 1775 /* 1776 * Some interesting rules of balancing: 1777 * we delete a maximum of two nodes per level per balancing: we never 1778 * delete R, when we delete two of three nodes L, S, R then we move 1779 * them into R. 1780 * 1781 * we only delete L if we are deleting two nodes, if we delete only 1782 * one node we delete S 1783 * 1784 * if we shift leaves then we shift as much as we can: this is a 1785 * deliberate policy of extremism in node packing which results in 1786 * higher average utilization after repeated random balance operations 1787 * at the cost of more memory copies and more balancing as a result of 1788 * small insertions to full nodes. 1789 * 1790 * if we shift internal nodes we try to evenly balance the node 1791 * utilization, with consequent less balancing at the cost of lower 1792 * utilization. 1793 * 1794 * one could argue that the policy for directories in leaves should be 1795 * that of internal nodes, but we will wait until another day to 1796 * evaluate this.... It would be nice to someday measure and prove 1797 * these assumptions as to what is optimal.... 1798 */ 1799 1800 static inline void do_balance_starts(struct tree_balance *tb) 1801 { 1802 /* use print_cur_tb() to see initial state of struct tree_balance */ 1803 1804 /* store_print_tb (tb); */ 1805 1806 /* do not delete, just comment it out */ 1807 /* 1808 print_tb(flag, PATH_LAST_POSITION(tb->tb_path), 1809 tb->tb_path->pos_in_item, tb, "check"); 1810 */ 1811 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 1812 #ifdef CONFIG_REISERFS_CHECK 1813 REISERFS_SB(tb->tb_sb)->cur_tb = tb; 1814 #endif 1815 } 1816 1817 static inline void do_balance_completed(struct tree_balance *tb) 1818 { 1819 1820 #ifdef CONFIG_REISERFS_CHECK 1821 check_leaf_level(tb); 1822 check_internal_levels(tb); 1823 REISERFS_SB(tb->tb_sb)->cur_tb = NULL; 1824 #endif 1825 1826 /* 1827 * reiserfs_free_block is no longer schedule safe. So, we need to 1828 * put the buffers we want freed on the thrown list during do_balance, 1829 * and then free them now 1830 */ 1831 1832 REISERFS_SB(tb->tb_sb)->s_do_balance++; 1833 1834 /* release all nodes hold to perform the balancing */ 1835 unfix_nodes(tb); 1836 1837 free_thrown(tb); 1838 } 1839 1840 /* 1841 * do_balance - balance the tree 1842 * 1843 * @tb: tree_balance structure 1844 * @ih: item header of inserted item 1845 * @body: body of inserted item or bytes to paste 1846 * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste 1847 * 1848 * Cut means delete part of an item (includes removing an entry from a 1849 * directory). 1850 * 1851 * Delete means delete whole item. 1852 * 1853 * Insert means add a new item into the tree. 1854 * 1855 * Paste means to append to the end of an existing file or to 1856 * insert a directory entry. 1857 */ 1858 void do_balance(struct tree_balance *tb, struct item_head *ih, 1859 const char *body, int flag) 1860 { 1861 int child_pos; /* position of a child node in its parent */ 1862 int h; /* level of the tree being processed */ 1863 1864 /* 1865 * in our processing of one level we sometimes determine what 1866 * must be inserted into the next higher level. This insertion 1867 * consists of a key or two keys and their corresponding 1868 * pointers 1869 */ 1870 struct item_head insert_key[2]; 1871 1872 /* inserted node-ptrs for the next level */ 1873 struct buffer_head *insert_ptr[2]; 1874 1875 tb->tb_mode = flag; 1876 tb->need_balance_dirty = 0; 1877 1878 if (FILESYSTEM_CHANGED_TB(tb)) { 1879 reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has " 1880 "changed"); 1881 } 1882 /* if we have no real work to do */ 1883 if (!tb->insert_size[0]) { 1884 reiserfs_warning(tb->tb_sb, "PAP-12350", 1885 "insert_size == 0, mode == %c", flag); 1886 unfix_nodes(tb); 1887 return; 1888 } 1889 1890 atomic_inc(&fs_generation(tb->tb_sb)); 1891 do_balance_starts(tb); 1892 1893 /* 1894 * balance_leaf returns 0 except if combining L R and S into 1895 * one node. see balance_internal() for explanation of this 1896 * line of code. 1897 */ 1898 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + 1899 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); 1900 1901 #ifdef CONFIG_REISERFS_CHECK 1902 check_after_balance_leaf(tb); 1903 #endif 1904 1905 /* Balance internal level of the tree. */ 1906 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) 1907 child_pos = balance_internal(tb, h, child_pos, insert_key, 1908 insert_ptr); 1909 1910 do_balance_completed(tb); 1911 } 1912