1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of UBIFS. 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation. 6 * 7 * Authors: Artem Bityutskiy (Битюцкий Артём) 8 * Adrian Hunter 9 */ 10 11 /* 12 * This file implements UBIFS journal. 13 * 14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed 15 * length and position, while a bud logical eraseblock is any LEB in the main 16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log 17 * contains only references to buds and some other stuff like commit 18 * start node. The idea is that when we commit the journal, we do 19 * not copy the data, the buds just become indexed. Since after the commit the 20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we 21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will 22 * become leafs in the future. 23 * 24 * The journal is multi-headed because we want to write data to the journal as 25 * optimally as possible. It is nice to have nodes belonging to the same inode 26 * in one LEB, so we may write data owned by different inodes to different 27 * journal heads, although at present only one data head is used. 28 * 29 * For recovery reasons, the base head contains all inode nodes, all directory 30 * entry nodes and all truncate nodes. This means that the other heads contain 31 * only data nodes. 32 * 33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the 34 * time of commit, the bud is retained to continue to be used in the journal, 35 * even though the "front" of the LEB is now indexed. In that case, the log 36 * reference contains the offset where the bud starts for the purposes of the 37 * journal. 38 * 39 * The journal size has to be limited, because the larger is the journal, the 40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it 41 * takes (indexing in the TNC). 42 * 43 * All the journal write operations like 'ubifs_jnl_update()' here, which write 44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to 45 * unclean reboots. Should the unclean reboot happen, the recovery code drops 46 * all the nodes. 47 */ 48 49 #include "ubifs.h" 50 51 /** 52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node. 53 * @ino: the inode to zero out 54 */ 55 static inline void zero_ino_node_unused(struct ubifs_ino_node *ino) 56 { 57 memset(ino->padding1, 0, 4); 58 memset(ino->padding2, 0, 26); 59 } 60 61 /** 62 * zero_dent_node_unused - zero out unused fields of an on-flash directory 63 * entry node. 64 * @dent: the directory entry to zero out 65 */ 66 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent) 67 { 68 dent->padding1 = 0; 69 } 70 71 /** 72 * zero_trun_node_unused - zero out unused fields of an on-flash truncation 73 * node. 74 * @trun: the truncation node to zero out 75 */ 76 static inline void zero_trun_node_unused(struct ubifs_trun_node *trun) 77 { 78 memset(trun->padding, 0, 12); 79 } 80 81 static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum) 82 { 83 if (ubifs_authenticated(c)) 84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c)); 85 } 86 87 /** 88 * reserve_space - reserve space in the journal. 89 * @c: UBIFS file-system description object 90 * @jhead: journal head number 91 * @len: node length 92 * 93 * This function reserves space in journal head @head. If the reservation 94 * succeeded, the journal head stays locked and later has to be unlocked using 95 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to 96 * be done, and other negative error codes in case of other failures. 97 */ 98 static int reserve_space(struct ubifs_info *c, int jhead, int len) 99 { 100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; 101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 102 103 /* 104 * Typically, the base head has smaller nodes written to it, so it is 105 * better to try to allocate space at the ends of eraseblocks. This is 106 * what the squeeze parameter does. 107 */ 108 ubifs_assert(c, !c->ro_media && !c->ro_mount); 109 squeeze = (jhead == BASEHD); 110 again: 111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 112 113 if (c->ro_error) { 114 err = -EROFS; 115 goto out_unlock; 116 } 117 118 avail = c->leb_size - wbuf->offs - wbuf->used; 119 if (wbuf->lnum != -1 && avail >= len) 120 return 0; 121 122 /* 123 * Write buffer wasn't seek'ed or there is no enough space - look for an 124 * LEB with some empty space. 125 */ 126 lnum = ubifs_find_free_space(c, len, &offs, squeeze); 127 if (lnum >= 0) 128 goto out; 129 130 err = lnum; 131 if (err != -ENOSPC) 132 goto out_unlock; 133 134 /* 135 * No free space, we have to run garbage collector to make 136 * some. But the write-buffer mutex has to be unlocked because 137 * GC also takes it. 138 */ 139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead)); 140 mutex_unlock(&wbuf->io_mutex); 141 142 lnum = ubifs_garbage_collect(c, 0); 143 if (lnum < 0) { 144 err = lnum; 145 if (err != -ENOSPC) 146 return err; 147 148 /* 149 * GC could not make a free LEB. But someone else may 150 * have allocated new bud for this journal head, 151 * because we dropped @wbuf->io_mutex, so try once 152 * again. 153 */ 154 dbg_jnl("GC couldn't make a free LEB for jhead %s", 155 dbg_jhead(jhead)); 156 if (retries++ < 2) { 157 dbg_jnl("retry (%d)", retries); 158 goto again; 159 } 160 161 dbg_jnl("return -ENOSPC"); 162 return err; 163 } 164 165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead)); 167 avail = c->leb_size - wbuf->offs - wbuf->used; 168 169 if (wbuf->lnum != -1 && avail >= len) { 170 /* 171 * Someone else has switched the journal head and we have 172 * enough space now. This happens when more than one process is 173 * trying to write to the same journal head at the same time. 174 */ 175 dbg_jnl("return LEB %d back, already have LEB %d:%d", 176 lnum, wbuf->lnum, wbuf->offs + wbuf->used); 177 err = ubifs_return_leb(c, lnum); 178 if (err) 179 goto out_unlock; 180 return 0; 181 } 182 183 offs = 0; 184 185 out: 186 /* 187 * Make sure we synchronize the write-buffer before we add the new bud 188 * to the log. Otherwise we may have a power cut after the log 189 * reference node for the last bud (@lnum) is written but before the 190 * write-buffer data are written to the next-to-last bud 191 * (@wbuf->lnum). And the effect would be that the recovery would see 192 * that there is corruption in the next-to-last bud. 193 */ 194 err = ubifs_wbuf_sync_nolock(wbuf); 195 if (err) 196 goto out_return; 197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs); 198 if (err) 199 goto out_return; 200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); 201 if (err) 202 goto out_unlock; 203 204 return 0; 205 206 out_unlock: 207 mutex_unlock(&wbuf->io_mutex); 208 return err; 209 210 out_return: 211 /* An error occurred and the LEB has to be returned to lprops */ 212 ubifs_assert(c, err < 0); 213 err1 = ubifs_return_leb(c, lnum); 214 if (err1 && err == -EAGAIN) 215 /* 216 * Return original error code only if it is not %-EAGAIN, 217 * which is not really an error. Otherwise, return the error 218 * code of 'ubifs_return_leb()'. 219 */ 220 err = err1; 221 mutex_unlock(&wbuf->io_mutex); 222 return err; 223 } 224 225 static int ubifs_hash_nodes(struct ubifs_info *c, void *node, 226 int len, struct shash_desc *hash) 227 { 228 int auth_node_size = ubifs_auth_node_sz(c); 229 int err; 230 231 while (1) { 232 const struct ubifs_ch *ch = node; 233 int nodelen = le32_to_cpu(ch->len); 234 235 ubifs_assert(c, len >= auth_node_size); 236 237 if (len == auth_node_size) 238 break; 239 240 ubifs_assert(c, len > nodelen); 241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC)); 242 243 err = ubifs_shash_update(c, hash, (void *)node, nodelen); 244 if (err) 245 return err; 246 247 node += ALIGN(nodelen, 8); 248 len -= ALIGN(nodelen, 8); 249 } 250 251 return ubifs_prepare_auth_node(c, node, hash); 252 } 253 254 /** 255 * write_head - write data to a journal head. 256 * @c: UBIFS file-system description object 257 * @jhead: journal head 258 * @buf: buffer to write 259 * @len: length to write 260 * @lnum: LEB number written is returned here 261 * @offs: offset written is returned here 262 * @sync: non-zero if the write-buffer has to by synchronized 263 * 264 * This function writes data to the reserved space of journal head @jhead. 265 * Returns zero in case of success and a negative error code in case of 266 * failure. 267 */ 268 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, 269 int *lnum, int *offs, int sync) 270 { 271 int err; 272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 273 274 ubifs_assert(c, jhead != GCHD); 275 276 *lnum = c->jheads[jhead].wbuf.lnum; 277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 278 dbg_jnl("jhead %s, LEB %d:%d, len %d", 279 dbg_jhead(jhead), *lnum, *offs, len); 280 281 if (ubifs_authenticated(c)) { 282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash); 283 if (err) 284 return err; 285 } 286 287 err = ubifs_wbuf_write_nolock(wbuf, buf, len); 288 if (err) 289 return err; 290 if (sync) 291 err = ubifs_wbuf_sync_nolock(wbuf); 292 return err; 293 } 294 295 /** 296 * __queue_and_wait - queue a task and wait until the task is waked up. 297 * @c: UBIFS file-system description object 298 * 299 * This function adds current task in queue and waits until the task is waked 300 * up. This function should be called with @c->reserve_space_wq locked. 301 */ 302 static void __queue_and_wait(struct ubifs_info *c) 303 { 304 DEFINE_WAIT(wait); 305 306 __add_wait_queue_entry_tail_exclusive(&c->reserve_space_wq, &wait); 307 set_current_state(TASK_UNINTERRUPTIBLE); 308 spin_unlock(&c->reserve_space_wq.lock); 309 310 schedule(); 311 finish_wait(&c->reserve_space_wq, &wait); 312 } 313 314 /** 315 * wait_for_reservation - try queuing current task to wait until waked up. 316 * @c: UBIFS file-system description object 317 * 318 * This function queues current task to wait until waked up, if queuing is 319 * started(@c->need_wait_space is not %0). Returns %true if current task is 320 * added in queue, otherwise %false is returned. 321 */ 322 static bool wait_for_reservation(struct ubifs_info *c) 323 { 324 if (likely(atomic_read(&c->need_wait_space) == 0)) 325 /* Quick path to check whether queuing is started. */ 326 return false; 327 328 spin_lock(&c->reserve_space_wq.lock); 329 if (atomic_read(&c->need_wait_space) == 0) { 330 /* Queuing is not started, don't queue current task. */ 331 spin_unlock(&c->reserve_space_wq.lock); 332 return false; 333 } 334 335 __queue_and_wait(c); 336 return true; 337 } 338 339 /** 340 * wake_up_reservation - wake up first task in queue or stop queuing. 341 * @c: UBIFS file-system description object 342 * 343 * This function wakes up the first task in queue if it exists, or stops 344 * queuing if no tasks in queue. 345 */ 346 static void wake_up_reservation(struct ubifs_info *c) 347 { 348 spin_lock(&c->reserve_space_wq.lock); 349 if (waitqueue_active(&c->reserve_space_wq)) 350 wake_up_locked(&c->reserve_space_wq); 351 else 352 /* 353 * Compared with wait_for_reservation(), set @c->need_wait_space 354 * under the protection of wait queue lock, which can avoid that 355 * @c->need_wait_space is set to 0 after new task queued. 356 */ 357 atomic_set(&c->need_wait_space, 0); 358 spin_unlock(&c->reserve_space_wq.lock); 359 } 360 361 /** 362 * add_or_start_queue - add current task in queue or start queuing. 363 * @c: UBIFS file-system description object 364 * 365 * This function starts queuing if queuing is not started, otherwise adds 366 * current task in queue. 367 */ 368 static void add_or_start_queue(struct ubifs_info *c) 369 { 370 spin_lock(&c->reserve_space_wq.lock); 371 if (atomic_cmpxchg(&c->need_wait_space, 0, 1) == 0) { 372 /* Starts queuing, task can go on directly. */ 373 spin_unlock(&c->reserve_space_wq.lock); 374 return; 375 } 376 377 /* 378 * There are at least two tasks have retried more than 32 times 379 * at certain point, first task has started queuing, just queue 380 * the left tasks. 381 */ 382 __queue_and_wait(c); 383 } 384 385 /** 386 * make_reservation - reserve journal space. 387 * @c: UBIFS file-system description object 388 * @jhead: journal head 389 * @len: how many bytes to reserve 390 * 391 * This function makes space reservation in journal head @jhead. The function 392 * takes the commit lock and locks the journal head, and the caller has to 393 * unlock the head and finish the reservation with 'finish_reservation()'. 394 * Returns zero in case of success and a negative error code in case of 395 * failure. 396 * 397 * Note, the journal head may be unlocked as soon as the data is written, while 398 * the commit lock has to be released after the data has been added to the 399 * TNC. 400 */ 401 static int make_reservation(struct ubifs_info *c, int jhead, int len) 402 { 403 int err, cmt_retries = 0, nospc_retries = 0; 404 bool blocked = wait_for_reservation(c); 405 406 again: 407 down_read(&c->commit_sem); 408 err = reserve_space(c, jhead, len); 409 if (!err) { 410 /* c->commit_sem will get released via finish_reservation(). */ 411 goto out_wake_up; 412 } 413 up_read(&c->commit_sem); 414 415 if (err == -ENOSPC) { 416 /* 417 * GC could not make any progress. We should try to commit 418 * because it could make some dirty space and GC would make 419 * progress, so make the error -EAGAIN so that the below 420 * will commit and re-try. 421 */ 422 nospc_retries++; 423 dbg_jnl("no space, retry"); 424 err = -EAGAIN; 425 } 426 427 if (err != -EAGAIN) 428 goto out; 429 430 /* 431 * -EAGAIN means that the journal is full or too large, or the above 432 * code wants to do one commit. Do this and re-try. 433 */ 434 if (cmt_retries > 128) { 435 /* 436 * This should not happen unless: 437 * 1. The journal size limitations are too tough. 438 * 2. The budgeting is incorrect. We always have to be able to 439 * write to the media, because all operations are budgeted. 440 * Deletions are not budgeted, though, but we reserve an 441 * extra LEB for them. 442 */ 443 ubifs_err(c, "stuck in space allocation, nospc_retries %d", 444 nospc_retries); 445 err = -ENOSPC; 446 goto out; 447 } else if (cmt_retries > 32) { 448 /* 449 * It's almost impossible to happen, unless there are many tasks 450 * making reservation concurrently and someone task has retried 451 * gc + commit for many times, generated available space during 452 * this period are grabbed by other tasks. 453 * But if it happens, start queuing up all tasks that will make 454 * space reservation, then there is only one task making space 455 * reservation at any time, and it can always make success under 456 * the premise of correct budgeting. 457 */ 458 ubifs_warn(c, "too many space allocation cmt_retries (%d) " 459 "nospc_retries (%d), start queuing tasks", 460 cmt_retries, nospc_retries); 461 462 if (!blocked) { 463 blocked = true; 464 add_or_start_queue(c); 465 } 466 } 467 468 dbg_jnl("-EAGAIN, commit and retry (retried %d times)", 469 cmt_retries); 470 cmt_retries += 1; 471 472 err = ubifs_run_commit(c); 473 if (err) 474 goto out_wake_up; 475 goto again; 476 477 out: 478 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", 479 len, jhead, err); 480 if (err == -ENOSPC) { 481 /* This are some budgeting problems, print useful information */ 482 down_write(&c->commit_sem); 483 dump_stack(); 484 ubifs_dump_budg(c, &c->bi); 485 ubifs_dump_lprops(c); 486 cmt_retries = dbg_check_lprops(c); 487 up_write(&c->commit_sem); 488 } 489 out_wake_up: 490 if (blocked) { 491 /* 492 * Only tasks that have ever started queuing or ever been queued 493 * can wake up other queued tasks, which can make sure that 494 * there is only one task waked up to make space reservation. 495 * For example: 496 * task A task B task C 497 * make_reservation make_reservation 498 * reserve_space // 0 499 * wake_up_reservation 500 * atomic_cmpxchg // 0, start queuing 501 * reserve_space 502 * wait_for_reservation 503 * __queue_and_wait 504 * add_wait_queue 505 * if (blocked) // false 506 * // So that task C won't be waked up to race with task B 507 */ 508 wake_up_reservation(c); 509 } 510 return err; 511 } 512 513 /** 514 * release_head - release a journal head. 515 * @c: UBIFS file-system description object 516 * @jhead: journal head 517 * 518 * This function releases journal head @jhead which was locked by 519 * the 'make_reservation()' function. It has to be called after each successful 520 * 'make_reservation()' invocation. 521 */ 522 static inline void release_head(struct ubifs_info *c, int jhead) 523 { 524 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex); 525 } 526 527 /** 528 * finish_reservation - finish a reservation. 529 * @c: UBIFS file-system description object 530 * 531 * This function finishes journal space reservation. It must be called after 532 * 'make_reservation()'. 533 */ 534 static void finish_reservation(struct ubifs_info *c) 535 { 536 up_read(&c->commit_sem); 537 } 538 539 /** 540 * get_dent_type - translate VFS inode mode to UBIFS directory entry type. 541 * @mode: inode mode 542 */ 543 static int get_dent_type(int mode) 544 { 545 switch (mode & S_IFMT) { 546 case S_IFREG: 547 return UBIFS_ITYPE_REG; 548 case S_IFDIR: 549 return UBIFS_ITYPE_DIR; 550 case S_IFLNK: 551 return UBIFS_ITYPE_LNK; 552 case S_IFBLK: 553 return UBIFS_ITYPE_BLK; 554 case S_IFCHR: 555 return UBIFS_ITYPE_CHR; 556 case S_IFIFO: 557 return UBIFS_ITYPE_FIFO; 558 case S_IFSOCK: 559 return UBIFS_ITYPE_SOCK; 560 default: 561 BUG(); 562 } 563 return 0; 564 } 565 566 /** 567 * pack_inode - pack an inode node. 568 * @c: UBIFS file-system description object 569 * @ino: buffer in which to pack inode node 570 * @inode: inode to pack 571 * @last: indicates the last node of the group 572 */ 573 static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino, 574 const struct inode *inode, int last) 575 { 576 int data_len = 0, last_reference = !inode->i_nlink; 577 struct ubifs_inode *ui = ubifs_inode(inode); 578 579 ino->ch.node_type = UBIFS_INO_NODE; 580 ino_key_init_flash(c, &ino->key, inode->i_ino); 581 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum); 582 ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode)); 583 ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode)); 584 ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode)); 585 ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 586 ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode)); 587 ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 588 ino->uid = cpu_to_le32(i_uid_read(inode)); 589 ino->gid = cpu_to_le32(i_gid_read(inode)); 590 ino->mode = cpu_to_le32(inode->i_mode); 591 ino->flags = cpu_to_le32(ui->flags); 592 ino->size = cpu_to_le64(ui->ui_size); 593 ino->nlink = cpu_to_le32(inode->i_nlink); 594 ino->compr_type = cpu_to_le16(ui->compr_type); 595 ino->data_len = cpu_to_le32(ui->data_len); 596 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt); 597 ino->xattr_size = cpu_to_le32(ui->xattr_size); 598 ino->xattr_names = cpu_to_le32(ui->xattr_names); 599 zero_ino_node_unused(ino); 600 601 /* 602 * Drop the attached data if this is a deletion inode, the data is not 603 * needed anymore. 604 */ 605 if (!last_reference) { 606 memcpy(ino->data, ui->data, ui->data_len); 607 data_len = ui->data_len; 608 } 609 610 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last); 611 } 612 613 /** 614 * mark_inode_clean - mark UBIFS inode as clean. 615 * @c: UBIFS file-system description object 616 * @ui: UBIFS inode to mark as clean 617 * 618 * This helper function marks UBIFS inode @ui as clean by cleaning the 619 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the 620 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would 621 * just do nothing. 622 */ 623 static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui) 624 { 625 if (ui->dirty) 626 ubifs_release_dirty_inode_budget(c, ui); 627 ui->dirty = 0; 628 } 629 630 static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent) 631 { 632 if (c->double_hash) 633 dent->cookie = (__force __le32) get_random_u32(); 634 else 635 dent->cookie = 0; 636 } 637 638 /** 639 * ubifs_jnl_update - update inode. 640 * @c: UBIFS file-system description object 641 * @dir: parent inode or host inode in case of extended attributes 642 * @nm: directory entry name 643 * @inode: inode to update 644 * @deletion: indicates a directory entry deletion i.e unlink or rmdir 645 * @xent: non-zero if the directory entry is an extended attribute entry 646 * @in_orphan: indicates whether the @inode is in orphan list 647 * 648 * This function updates an inode by writing a directory entry (or extended 649 * attribute entry), the inode itself, and the parent directory inode (or the 650 * host inode) to the journal. 651 * 652 * The function writes the host inode @dir last, which is important in case of 653 * extended attributes. Indeed, then we guarantee that if the host inode gets 654 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed, 655 * the extended attribute inode gets flushed too. And this is exactly what the 656 * user expects - synchronizing the host inode synchronizes its extended 657 * attributes. Similarly, this guarantees that if @dir is synchronized, its 658 * directory entry corresponding to @nm gets synchronized too. 659 * 660 * If the inode (@inode) or the parent directory (@dir) are synchronous, this 661 * function synchronizes the write-buffer. 662 * 663 * This function marks the @dir and @inode inodes as clean and returns zero on 664 * success. In case of failure, a negative error code is returned. 665 */ 666 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, 667 const struct fscrypt_name *nm, const struct inode *inode, 668 int deletion, int xent, int in_orphan) 669 { 670 int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; 671 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); 672 int last_reference = !!(deletion && inode->i_nlink == 0); 673 struct ubifs_inode *ui = ubifs_inode(inode); 674 struct ubifs_inode *host_ui = ubifs_inode(dir); 675 struct ubifs_dent_node *dent; 676 struct ubifs_ino_node *ino; 677 union ubifs_key dent_key, ino_key; 678 u8 hash_dent[UBIFS_HASH_ARR_SZ]; 679 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 680 u8 hash_ino_host[UBIFS_HASH_ARR_SZ]; 681 682 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 683 684 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 685 ilen = UBIFS_INO_NODE_SZ; 686 687 /* 688 * If the last reference to the inode is being deleted, then there is 689 * no need to attach and write inode data, it is being deleted anyway. 690 * And if the inode is being deleted, no need to synchronize 691 * write-buffer even if the inode is synchronous. 692 */ 693 if (!last_reference) { 694 ilen += ui->data_len; 695 sync |= IS_SYNC(inode); 696 } 697 698 aligned_dlen = ALIGN(dlen, 8); 699 aligned_ilen = ALIGN(ilen, 8); 700 701 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; 702 /* Make sure to also account for extended attributes */ 703 if (ubifs_authenticated(c)) 704 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c); 705 else 706 len += host_ui->data_len; 707 708 dent = kzalloc(len, GFP_NOFS); 709 if (!dent) 710 return -ENOMEM; 711 712 /* Make reservation before allocating sequence numbers */ 713 err = make_reservation(c, BASEHD, len); 714 if (err) 715 goto out_free; 716 717 if (!xent) { 718 dent->ch.node_type = UBIFS_DENT_NODE; 719 if (fname_name(nm) == NULL) 720 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash); 721 else 722 dent_key_init(c, &dent_key, dir->i_ino, nm); 723 } else { 724 dent->ch.node_type = UBIFS_XENT_NODE; 725 xent_key_init(c, &dent_key, dir->i_ino, nm); 726 } 727 728 key_write(c, &dent_key, dent->key); 729 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino); 730 dent->type = get_dent_type(inode->i_mode); 731 dent->nlen = cpu_to_le16(fname_len(nm)); 732 memcpy(dent->name, fname_name(nm), fname_len(nm)); 733 dent->name[fname_len(nm)] = '\0'; 734 set_dent_cookie(c, dent); 735 736 zero_dent_node_unused(dent); 737 ubifs_prep_grp_node(c, dent, dlen, 0); 738 err = ubifs_node_calc_hash(c, dent, hash_dent); 739 if (err) 740 goto out_release; 741 742 ino = (void *)dent + aligned_dlen; 743 pack_inode(c, ino, inode, 0); 744 err = ubifs_node_calc_hash(c, ino, hash_ino); 745 if (err) 746 goto out_release; 747 748 ino = (void *)ino + aligned_ilen; 749 pack_inode(c, ino, dir, 1); 750 err = ubifs_node_calc_hash(c, ino, hash_ino_host); 751 if (err) 752 goto out_release; 753 754 if (last_reference && !in_orphan) { 755 err = ubifs_add_orphan(c, inode->i_ino); 756 if (err) { 757 release_head(c, BASEHD); 758 goto out_finish; 759 } 760 ui->del_cmtno = c->cmt_no; 761 orphan_added = 1; 762 } 763 764 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); 765 if (err) 766 goto out_release; 767 if (!sync) { 768 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 769 770 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 771 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino); 772 } 773 release_head(c, BASEHD); 774 kfree(dent); 775 ubifs_add_auth_dirt(c, lnum); 776 777 if (deletion) { 778 if (fname_name(nm) == NULL) 779 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash); 780 else 781 err = ubifs_tnc_remove_nm(c, &dent_key, nm); 782 if (err) 783 goto out_ro; 784 err = ubifs_add_dirt(c, lnum, dlen); 785 } else 786 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, 787 hash_dent, nm); 788 if (err) 789 goto out_ro; 790 791 /* 792 * Note, we do not remove the inode from TNC even if the last reference 793 * to it has just been deleted, because the inode may still be opened. 794 * Instead, the inode has been added to orphan lists and the orphan 795 * subsystem will take further care about it. 796 */ 797 ino_key_init(c, &ino_key, inode->i_ino); 798 ino_offs = dent_offs + aligned_dlen; 799 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino); 800 if (err) 801 goto out_ro; 802 803 ino_key_init(c, &ino_key, dir->i_ino); 804 ino_offs += aligned_ilen; 805 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, 806 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host); 807 if (err) 808 goto out_ro; 809 810 if (in_orphan && inode->i_nlink) 811 ubifs_delete_orphan(c, inode->i_ino); 812 813 finish_reservation(c); 814 spin_lock(&ui->ui_lock); 815 ui->synced_i_size = ui->ui_size; 816 spin_unlock(&ui->ui_lock); 817 if (xent) { 818 spin_lock(&host_ui->ui_lock); 819 host_ui->synced_i_size = host_ui->ui_size; 820 spin_unlock(&host_ui->ui_lock); 821 } 822 mark_inode_clean(c, ui); 823 mark_inode_clean(c, host_ui); 824 return 0; 825 826 out_finish: 827 finish_reservation(c); 828 out_free: 829 kfree(dent); 830 return err; 831 832 out_release: 833 release_head(c, BASEHD); 834 kfree(dent); 835 out_ro: 836 ubifs_ro_mode(c, err); 837 if (orphan_added) 838 ubifs_delete_orphan(c, inode->i_ino); 839 finish_reservation(c); 840 return err; 841 } 842 843 /** 844 * ubifs_jnl_write_data - write a data node to the journal. 845 * @c: UBIFS file-system description object 846 * @inode: inode the data node belongs to 847 * @key: node key 848 * @buf: buffer to write 849 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE) 850 * 851 * This function writes a data node to the journal. Returns %0 if the data node 852 * was successfully written, and a negative error code in case of failure. 853 */ 854 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, 855 const union ubifs_key *key, const void *buf, int len) 856 { 857 struct ubifs_data_node *data; 858 int err, lnum, offs, compr_type, out_len, compr_len, auth_len; 859 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1; 860 int write_len; 861 struct ubifs_inode *ui = ubifs_inode(inode); 862 bool encrypted = IS_ENCRYPTED(inode); 863 u8 hash[UBIFS_HASH_ARR_SZ]; 864 865 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", 866 (unsigned long)key_inum(c, key), key_block(c, key), len); 867 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE); 868 869 if (encrypted) 870 dlen += UBIFS_CIPHER_BLOCK_SIZE; 871 872 auth_len = ubifs_auth_node_sz(c); 873 874 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN); 875 if (!data) { 876 /* 877 * Fall-back to the write reserve buffer. Note, we might be 878 * currently on the memory reclaim path, when the kernel is 879 * trying to free some memory by writing out dirty pages. The 880 * write reserve buffer helps us to guarantee that we are 881 * always able to write the data. 882 */ 883 allocated = 0; 884 mutex_lock(&c->write_reserve_mutex); 885 data = c->write_reserve_buf; 886 } 887 888 data->ch.node_type = UBIFS_DATA_NODE; 889 key_write(c, key, &data->key); 890 data->size = cpu_to_le32(len); 891 892 if (!(ui->flags & UBIFS_COMPR_FL)) 893 /* Compression is disabled for this inode */ 894 compr_type = UBIFS_COMPR_NONE; 895 else 896 compr_type = ui->compr_type; 897 898 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ; 899 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type); 900 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); 901 902 if (encrypted) { 903 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key)); 904 if (err) 905 goto out_free; 906 907 } else { 908 data->compr_size = 0; 909 out_len = compr_len; 910 } 911 912 dlen = UBIFS_DATA_NODE_SZ + out_len; 913 if (ubifs_authenticated(c)) 914 write_len = ALIGN(dlen, 8) + auth_len; 915 else 916 write_len = dlen; 917 918 data->compr_type = cpu_to_le16(compr_type); 919 920 /* Make reservation before allocating sequence numbers */ 921 err = make_reservation(c, DATAHD, write_len); 922 if (err) 923 goto out_free; 924 925 ubifs_prepare_node(c, data, dlen, 0); 926 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0); 927 if (err) 928 goto out_release; 929 930 err = ubifs_node_calc_hash(c, data, hash); 931 if (err) 932 goto out_release; 933 934 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); 935 release_head(c, DATAHD); 936 937 ubifs_add_auth_dirt(c, lnum); 938 939 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash); 940 if (err) 941 goto out_ro; 942 943 finish_reservation(c); 944 if (!allocated) 945 mutex_unlock(&c->write_reserve_mutex); 946 else 947 kfree(data); 948 return 0; 949 950 out_release: 951 release_head(c, DATAHD); 952 out_ro: 953 ubifs_ro_mode(c, err); 954 finish_reservation(c); 955 out_free: 956 if (!allocated) 957 mutex_unlock(&c->write_reserve_mutex); 958 else 959 kfree(data); 960 return err; 961 } 962 963 /** 964 * ubifs_jnl_write_inode - flush inode to the journal. 965 * @c: UBIFS file-system description object 966 * @inode: inode to flush 967 * 968 * This function writes inode @inode to the journal. If the inode is 969 * synchronous, it also synchronizes the write-buffer. Returns zero in case of 970 * success and a negative error code in case of failure. 971 */ 972 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) 973 { 974 int err, lnum, offs; 975 struct ubifs_ino_node *ino, *ino_start; 976 struct ubifs_inode *ui = ubifs_inode(inode); 977 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ; 978 int last_reference = !inode->i_nlink; 979 int kill_xattrs = ui->xattr_cnt && last_reference; 980 u8 hash[UBIFS_HASH_ARR_SZ]; 981 982 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink); 983 984 if (kill_xattrs && ui->xattr_cnt > ubifs_xattr_max_cnt(c)) { 985 ubifs_err(c, "Cannot delete inode, it has too much xattrs!"); 986 err = -EPERM; 987 ubifs_ro_mode(c, err); 988 return err; 989 } 990 991 /* 992 * If the inode is being deleted, do not write the attached data. No 993 * need to synchronize the write-buffer either. 994 */ 995 if (!last_reference) { 996 ilen += ui->data_len; 997 sync = IS_SYNC(inode); 998 } else if (kill_xattrs) { 999 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt; 1000 } 1001 1002 if (ubifs_authenticated(c)) 1003 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c); 1004 else 1005 write_len += ilen; 1006 1007 ino_start = ino = kmalloc(write_len, GFP_NOFS); 1008 if (!ino) 1009 return -ENOMEM; 1010 1011 /* Make reservation before allocating sequence numbers */ 1012 err = make_reservation(c, BASEHD, write_len); 1013 if (err) 1014 goto out_free; 1015 1016 if (kill_xattrs) { 1017 union ubifs_key key; 1018 struct fscrypt_name nm = {0}; 1019 struct inode *xino; 1020 struct ubifs_dent_node *xent, *pxent = NULL; 1021 1022 lowest_xent_key(c, &key, inode->i_ino); 1023 while (1) { 1024 xent = ubifs_tnc_next_ent(c, &key, &nm); 1025 if (IS_ERR(xent)) { 1026 err = PTR_ERR(xent); 1027 if (err == -ENOENT) 1028 break; 1029 1030 kfree(pxent); 1031 goto out_release; 1032 } 1033 1034 fname_name(&nm) = xent->name; 1035 fname_len(&nm) = le16_to_cpu(xent->nlen); 1036 1037 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); 1038 if (IS_ERR(xino)) { 1039 err = PTR_ERR(xino); 1040 ubifs_err(c, "dead directory entry '%s', error %d", 1041 xent->name, err); 1042 ubifs_ro_mode(c, err); 1043 kfree(pxent); 1044 kfree(xent); 1045 goto out_release; 1046 } 1047 ubifs_assert(c, ubifs_inode(xino)->xattr); 1048 1049 clear_nlink(xino); 1050 pack_inode(c, ino, xino, 0); 1051 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1052 iput(xino); 1053 1054 kfree(pxent); 1055 pxent = xent; 1056 key_read(c, &xent->key, &key); 1057 } 1058 kfree(pxent); 1059 } 1060 1061 pack_inode(c, ino, inode, 1); 1062 err = ubifs_node_calc_hash(c, ino, hash); 1063 if (err) 1064 goto out_release; 1065 1066 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync); 1067 if (err) 1068 goto out_release; 1069 if (!sync) 1070 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1071 inode->i_ino); 1072 release_head(c, BASEHD); 1073 1074 if (last_reference) { 1075 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1076 if (err) 1077 goto out_ro; 1078 ubifs_delete_orphan(c, inode->i_ino); 1079 err = ubifs_add_dirt(c, lnum, write_len); 1080 } else { 1081 union ubifs_key key; 1082 1083 ubifs_add_auth_dirt(c, lnum); 1084 1085 ino_key_init(c, &key, inode->i_ino); 1086 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); 1087 } 1088 if (err) 1089 goto out_ro; 1090 1091 finish_reservation(c); 1092 spin_lock(&ui->ui_lock); 1093 ui->synced_i_size = ui->ui_size; 1094 spin_unlock(&ui->ui_lock); 1095 kfree(ino_start); 1096 return 0; 1097 1098 out_release: 1099 release_head(c, BASEHD); 1100 out_ro: 1101 ubifs_ro_mode(c, err); 1102 finish_reservation(c); 1103 out_free: 1104 kfree(ino_start); 1105 return err; 1106 } 1107 1108 /** 1109 * ubifs_jnl_delete_inode - delete an inode. 1110 * @c: UBIFS file-system description object 1111 * @inode: inode to delete 1112 * 1113 * This function deletes inode @inode which includes removing it from orphans, 1114 * deleting it from TNC and, in some cases, writing a deletion inode to the 1115 * journal. 1116 * 1117 * When regular file inodes are unlinked or a directory inode is removed, the 1118 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and 1119 * direntry to the media, and adds the inode to orphans. After this, when the 1120 * last reference to this inode has been dropped, this function is called. In 1121 * general, it has to write one more deletion inode to the media, because if 1122 * a commit happened between 'ubifs_jnl_update()' and 1123 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal 1124 * anymore, and in fact it might not be on the flash anymore, because it might 1125 * have been garbage-collected already. And for optimization reasons UBIFS does 1126 * not read the orphan area if it has been unmounted cleanly, so it would have 1127 * no indication in the journal that there is a deleted inode which has to be 1128 * removed from TNC. 1129 * 1130 * However, if there was no commit between 'ubifs_jnl_update()' and 1131 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion 1132 * inode to the media for the second time. And this is quite a typical case. 1133 * 1134 * This function returns zero in case of success and a negative error code in 1135 * case of failure. 1136 */ 1137 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) 1138 { 1139 int err; 1140 struct ubifs_inode *ui = ubifs_inode(inode); 1141 1142 ubifs_assert(c, inode->i_nlink == 0); 1143 1144 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no) 1145 /* A commit happened for sure or inode hosts xattrs */ 1146 return ubifs_jnl_write_inode(c, inode); 1147 1148 down_read(&c->commit_sem); 1149 /* 1150 * Check commit number again, because the first test has been done 1151 * without @c->commit_sem, so a commit might have happened. 1152 */ 1153 if (ui->del_cmtno != c->cmt_no) { 1154 up_read(&c->commit_sem); 1155 return ubifs_jnl_write_inode(c, inode); 1156 } 1157 1158 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1159 if (err) 1160 ubifs_ro_mode(c, err); 1161 else 1162 ubifs_delete_orphan(c, inode->i_ino); 1163 up_read(&c->commit_sem); 1164 return err; 1165 } 1166 1167 /** 1168 * ubifs_jnl_xrename - cross rename two directory entries. 1169 * @c: UBIFS file-system description object 1170 * @fst_dir: parent inode of 1st directory entry to exchange 1171 * @fst_inode: 1st inode to exchange 1172 * @fst_nm: name of 1st inode to exchange 1173 * @snd_dir: parent inode of 2nd directory entry to exchange 1174 * @snd_inode: 2nd inode to exchange 1175 * @snd_nm: name of 2nd inode to exchange 1176 * @sync: non-zero if the write-buffer has to be synchronized 1177 * 1178 * This function implements the cross rename operation which may involve 1179 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean 1180 * and returns zero on success. In case of failure, a negative error code is 1181 * returned. 1182 */ 1183 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, 1184 const struct inode *fst_inode, 1185 const struct fscrypt_name *fst_nm, 1186 const struct inode *snd_dir, 1187 const struct inode *snd_inode, 1188 const struct fscrypt_name *snd_nm, int sync) 1189 { 1190 union ubifs_key key; 1191 struct ubifs_dent_node *dent1, *dent2; 1192 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ; 1193 int aligned_dlen1, aligned_dlen2; 1194 int twoparents = (fst_dir != snd_dir); 1195 void *p; 1196 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1197 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1198 u8 hash_p1[UBIFS_HASH_ARR_SZ]; 1199 u8 hash_p2[UBIFS_HASH_ARR_SZ]; 1200 1201 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); 1202 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); 1203 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); 1204 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); 1205 1206 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1; 1207 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1; 1208 aligned_dlen1 = ALIGN(dlen1, 8); 1209 aligned_dlen2 = ALIGN(dlen2, 8); 1210 1211 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8); 1212 if (twoparents) 1213 len += plen; 1214 1215 len += ubifs_auth_node_sz(c); 1216 1217 dent1 = kzalloc(len, GFP_NOFS); 1218 if (!dent1) 1219 return -ENOMEM; 1220 1221 /* Make reservation before allocating sequence numbers */ 1222 err = make_reservation(c, BASEHD, len); 1223 if (err) 1224 goto out_free; 1225 1226 /* Make new dent for 1st entry */ 1227 dent1->ch.node_type = UBIFS_DENT_NODE; 1228 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm); 1229 dent1->inum = cpu_to_le64(fst_inode->i_ino); 1230 dent1->type = get_dent_type(fst_inode->i_mode); 1231 dent1->nlen = cpu_to_le16(fname_len(snd_nm)); 1232 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm)); 1233 dent1->name[fname_len(snd_nm)] = '\0'; 1234 set_dent_cookie(c, dent1); 1235 zero_dent_node_unused(dent1); 1236 ubifs_prep_grp_node(c, dent1, dlen1, 0); 1237 err = ubifs_node_calc_hash(c, dent1, hash_dent1); 1238 if (err) 1239 goto out_release; 1240 1241 /* Make new dent for 2nd entry */ 1242 dent2 = (void *)dent1 + aligned_dlen1; 1243 dent2->ch.node_type = UBIFS_DENT_NODE; 1244 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm); 1245 dent2->inum = cpu_to_le64(snd_inode->i_ino); 1246 dent2->type = get_dent_type(snd_inode->i_mode); 1247 dent2->nlen = cpu_to_le16(fname_len(fst_nm)); 1248 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm)); 1249 dent2->name[fname_len(fst_nm)] = '\0'; 1250 set_dent_cookie(c, dent2); 1251 zero_dent_node_unused(dent2); 1252 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1253 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1254 if (err) 1255 goto out_release; 1256 1257 p = (void *)dent2 + aligned_dlen2; 1258 if (!twoparents) { 1259 pack_inode(c, p, fst_dir, 1); 1260 err = ubifs_node_calc_hash(c, p, hash_p1); 1261 if (err) 1262 goto out_release; 1263 } else { 1264 pack_inode(c, p, fst_dir, 0); 1265 err = ubifs_node_calc_hash(c, p, hash_p1); 1266 if (err) 1267 goto out_release; 1268 p += ALIGN(plen, 8); 1269 pack_inode(c, p, snd_dir, 1); 1270 err = ubifs_node_calc_hash(c, p, hash_p2); 1271 if (err) 1272 goto out_release; 1273 } 1274 1275 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); 1276 if (err) 1277 goto out_release; 1278 if (!sync) { 1279 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1280 1281 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino); 1282 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino); 1283 } 1284 release_head(c, BASEHD); 1285 1286 ubifs_add_auth_dirt(c, lnum); 1287 1288 dent_key_init(c, &key, snd_dir->i_ino, snd_nm); 1289 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm); 1290 if (err) 1291 goto out_ro; 1292 1293 offs += aligned_dlen1; 1294 dent_key_init(c, &key, fst_dir->i_ino, fst_nm); 1295 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm); 1296 if (err) 1297 goto out_ro; 1298 1299 offs += aligned_dlen2; 1300 1301 ino_key_init(c, &key, fst_dir->i_ino); 1302 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1); 1303 if (err) 1304 goto out_ro; 1305 1306 if (twoparents) { 1307 offs += ALIGN(plen, 8); 1308 ino_key_init(c, &key, snd_dir->i_ino); 1309 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2); 1310 if (err) 1311 goto out_ro; 1312 } 1313 1314 finish_reservation(c); 1315 1316 mark_inode_clean(c, ubifs_inode(fst_dir)); 1317 if (twoparents) 1318 mark_inode_clean(c, ubifs_inode(snd_dir)); 1319 kfree(dent1); 1320 return 0; 1321 1322 out_release: 1323 release_head(c, BASEHD); 1324 out_ro: 1325 ubifs_ro_mode(c, err); 1326 finish_reservation(c); 1327 out_free: 1328 kfree(dent1); 1329 return err; 1330 } 1331 1332 /** 1333 * ubifs_jnl_rename - rename a directory entry. 1334 * @c: UBIFS file-system description object 1335 * @old_dir: parent inode of directory entry to rename 1336 * @old_inode: directory entry's inode to rename 1337 * @old_nm: name of the old directory entry to rename 1338 * @new_dir: parent inode of directory entry to rename 1339 * @new_inode: new directory entry's inode (or directory entry's inode to 1340 * replace) 1341 * @new_nm: new name of the new directory entry 1342 * @whiteout: whiteout inode 1343 * @sync: non-zero if the write-buffer has to be synchronized 1344 * @delete_orphan: indicates an orphan entry deletion for @whiteout 1345 * 1346 * This function implements the re-name operation which may involve writing up 1347 * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes) 1348 * and 2 directory entries. It marks the written inodes as clean and returns 1349 * zero on success. In case of failure, a negative error code is returned. 1350 */ 1351 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, 1352 const struct inode *old_inode, 1353 const struct fscrypt_name *old_nm, 1354 const struct inode *new_dir, 1355 const struct inode *new_inode, 1356 const struct fscrypt_name *new_nm, 1357 const struct inode *whiteout, int sync, int delete_orphan) 1358 { 1359 void *p; 1360 union ubifs_key key; 1361 struct ubifs_dent_node *dent, *dent2; 1362 int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0; 1363 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; 1364 int last_reference = !!(new_inode && new_inode->i_nlink == 0); 1365 int move = (old_dir != new_dir); 1366 struct ubifs_inode *new_ui, *whiteout_ui; 1367 u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; 1368 u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; 1369 u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; 1370 u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ]; 1371 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1372 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1373 1374 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); 1375 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); 1376 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); 1377 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); 1378 1379 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1; 1380 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1; 1381 if (new_inode) { 1382 new_ui = ubifs_inode(new_inode); 1383 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex)); 1384 ilen = UBIFS_INO_NODE_SZ; 1385 if (!last_reference) 1386 ilen += new_ui->data_len; 1387 } else 1388 ilen = 0; 1389 1390 if (whiteout) { 1391 whiteout_ui = ubifs_inode(whiteout); 1392 ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex)); 1393 ubifs_assert(c, whiteout->i_nlink == 1); 1394 ubifs_assert(c, !whiteout_ui->dirty); 1395 wlen = UBIFS_INO_NODE_SZ; 1396 wlen += whiteout_ui->data_len; 1397 } else 1398 wlen = 0; 1399 1400 aligned_dlen1 = ALIGN(dlen1, 8); 1401 aligned_dlen2 = ALIGN(dlen2, 8); 1402 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + 1403 ALIGN(wlen, 8) + ALIGN(plen, 8); 1404 if (move) 1405 len += plen; 1406 1407 len += ubifs_auth_node_sz(c); 1408 1409 dent = kzalloc(len, GFP_NOFS); 1410 if (!dent) 1411 return -ENOMEM; 1412 1413 /* Make reservation before allocating sequence numbers */ 1414 err = make_reservation(c, BASEHD, len); 1415 if (err) 1416 goto out_free; 1417 1418 /* Make new dent */ 1419 dent->ch.node_type = UBIFS_DENT_NODE; 1420 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm); 1421 dent->inum = cpu_to_le64(old_inode->i_ino); 1422 dent->type = get_dent_type(old_inode->i_mode); 1423 dent->nlen = cpu_to_le16(fname_len(new_nm)); 1424 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm)); 1425 dent->name[fname_len(new_nm)] = '\0'; 1426 set_dent_cookie(c, dent); 1427 zero_dent_node_unused(dent); 1428 ubifs_prep_grp_node(c, dent, dlen1, 0); 1429 err = ubifs_node_calc_hash(c, dent, hash_dent1); 1430 if (err) 1431 goto out_release; 1432 1433 dent2 = (void *)dent + aligned_dlen1; 1434 dent2->ch.node_type = UBIFS_DENT_NODE; 1435 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm); 1436 1437 if (whiteout) { 1438 dent2->inum = cpu_to_le64(whiteout->i_ino); 1439 dent2->type = get_dent_type(whiteout->i_mode); 1440 } else { 1441 /* Make deletion dent */ 1442 dent2->inum = 0; 1443 dent2->type = DT_UNKNOWN; 1444 } 1445 dent2->nlen = cpu_to_le16(fname_len(old_nm)); 1446 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm)); 1447 dent2->name[fname_len(old_nm)] = '\0'; 1448 set_dent_cookie(c, dent2); 1449 zero_dent_node_unused(dent2); 1450 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1451 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1452 if (err) 1453 goto out_release; 1454 1455 p = (void *)dent2 + aligned_dlen2; 1456 if (new_inode) { 1457 pack_inode(c, p, new_inode, 0); 1458 err = ubifs_node_calc_hash(c, p, hash_new_inode); 1459 if (err) 1460 goto out_release; 1461 1462 p += ALIGN(ilen, 8); 1463 } 1464 1465 if (whiteout) { 1466 pack_inode(c, p, whiteout, 0); 1467 err = ubifs_node_calc_hash(c, p, hash_whiteout_inode); 1468 if (err) 1469 goto out_release; 1470 1471 p += ALIGN(wlen, 8); 1472 } 1473 1474 if (!move) { 1475 pack_inode(c, p, old_dir, 1); 1476 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1477 if (err) 1478 goto out_release; 1479 } else { 1480 pack_inode(c, p, old_dir, 0); 1481 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1482 if (err) 1483 goto out_release; 1484 1485 p += ALIGN(plen, 8); 1486 pack_inode(c, p, new_dir, 1); 1487 err = ubifs_node_calc_hash(c, p, hash_new_dir); 1488 if (err) 1489 goto out_release; 1490 } 1491 1492 if (last_reference) { 1493 err = ubifs_add_orphan(c, new_inode->i_ino); 1494 if (err) { 1495 release_head(c, BASEHD); 1496 goto out_finish; 1497 } 1498 new_ui->del_cmtno = c->cmt_no; 1499 orphan_added = 1; 1500 } 1501 1502 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); 1503 if (err) 1504 goto out_release; 1505 if (!sync) { 1506 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1507 1508 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino); 1509 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino); 1510 if (new_inode) 1511 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1512 new_inode->i_ino); 1513 if (whiteout) 1514 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1515 whiteout->i_ino); 1516 } 1517 release_head(c, BASEHD); 1518 1519 ubifs_add_auth_dirt(c, lnum); 1520 1521 dent_key_init(c, &key, new_dir->i_ino, new_nm); 1522 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm); 1523 if (err) 1524 goto out_ro; 1525 1526 offs += aligned_dlen1; 1527 if (whiteout) { 1528 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1529 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); 1530 if (err) 1531 goto out_ro; 1532 } else { 1533 err = ubifs_add_dirt(c, lnum, dlen2); 1534 if (err) 1535 goto out_ro; 1536 1537 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1538 err = ubifs_tnc_remove_nm(c, &key, old_nm); 1539 if (err) 1540 goto out_ro; 1541 } 1542 1543 offs += aligned_dlen2; 1544 if (new_inode) { 1545 ino_key_init(c, &key, new_inode->i_ino); 1546 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode); 1547 if (err) 1548 goto out_ro; 1549 offs += ALIGN(ilen, 8); 1550 } 1551 1552 if (whiteout) { 1553 ino_key_init(c, &key, whiteout->i_ino); 1554 err = ubifs_tnc_add(c, &key, lnum, offs, wlen, 1555 hash_whiteout_inode); 1556 if (err) 1557 goto out_ro; 1558 offs += ALIGN(wlen, 8); 1559 } 1560 1561 ino_key_init(c, &key, old_dir->i_ino); 1562 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); 1563 if (err) 1564 goto out_ro; 1565 1566 if (move) { 1567 offs += ALIGN(plen, 8); 1568 ino_key_init(c, &key, new_dir->i_ino); 1569 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir); 1570 if (err) 1571 goto out_ro; 1572 } 1573 1574 if (delete_orphan) 1575 ubifs_delete_orphan(c, whiteout->i_ino); 1576 1577 finish_reservation(c); 1578 if (new_inode) { 1579 mark_inode_clean(c, new_ui); 1580 spin_lock(&new_ui->ui_lock); 1581 new_ui->synced_i_size = new_ui->ui_size; 1582 spin_unlock(&new_ui->ui_lock); 1583 } 1584 /* 1585 * No need to mark whiteout inode clean. 1586 * Whiteout doesn't have non-zero size, no need to update 1587 * synced_i_size for whiteout_ui. 1588 */ 1589 mark_inode_clean(c, ubifs_inode(old_dir)); 1590 if (move) 1591 mark_inode_clean(c, ubifs_inode(new_dir)); 1592 kfree(dent); 1593 return 0; 1594 1595 out_release: 1596 release_head(c, BASEHD); 1597 out_ro: 1598 ubifs_ro_mode(c, err); 1599 if (orphan_added) 1600 ubifs_delete_orphan(c, new_inode->i_ino); 1601 out_finish: 1602 finish_reservation(c); 1603 out_free: 1604 kfree(dent); 1605 return err; 1606 } 1607 1608 /** 1609 * truncate_data_node - re-compress/encrypt a truncated data node. 1610 * @c: UBIFS file-system description object 1611 * @inode: inode which refers to the data node 1612 * @block: data block number 1613 * @dn: data node to re-compress 1614 * @new_len: new length 1615 * @dn_size: size of the data node @dn in memory 1616 * 1617 * This function is used when an inode is truncated and the last data node of 1618 * the inode has to be re-compressed/encrypted and re-written. 1619 */ 1620 static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode, 1621 unsigned int block, struct ubifs_data_node *dn, 1622 int *new_len, int dn_size) 1623 { 1624 void *buf; 1625 int err, dlen, compr_type, out_len, data_size; 1626 1627 out_len = le32_to_cpu(dn->size); 1628 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); 1629 if (!buf) 1630 return -ENOMEM; 1631 1632 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; 1633 data_size = dn_size - UBIFS_DATA_NODE_SZ; 1634 compr_type = le16_to_cpu(dn->compr_type); 1635 1636 if (IS_ENCRYPTED(inode)) { 1637 err = ubifs_decrypt(inode, dn, &dlen, block); 1638 if (err) 1639 goto out; 1640 } 1641 1642 if (compr_type == UBIFS_COMPR_NONE) { 1643 out_len = *new_len; 1644 } else { 1645 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); 1646 if (err) 1647 goto out; 1648 1649 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); 1650 } 1651 1652 if (IS_ENCRYPTED(inode)) { 1653 err = ubifs_encrypt(inode, dn, out_len, &data_size, block); 1654 if (err) 1655 goto out; 1656 1657 out_len = data_size; 1658 } else { 1659 dn->compr_size = 0; 1660 } 1661 1662 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE); 1663 dn->compr_type = cpu_to_le16(compr_type); 1664 dn->size = cpu_to_le32(*new_len); 1665 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1666 err = 0; 1667 out: 1668 kfree(buf); 1669 return err; 1670 } 1671 1672 /** 1673 * ubifs_jnl_truncate - update the journal for a truncation. 1674 * @c: UBIFS file-system description object 1675 * @inode: inode to truncate 1676 * @old_size: old size 1677 * @new_size: new size 1678 * 1679 * When the size of a file decreases due to truncation, a truncation node is 1680 * written, the journal tree is updated, and the last data block is re-written 1681 * if it has been affected. The inode is also updated in order to synchronize 1682 * the new inode size. 1683 * 1684 * This function marks the inode as clean and returns zero on success. In case 1685 * of failure, a negative error code is returned. 1686 */ 1687 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, 1688 loff_t old_size, loff_t new_size) 1689 { 1690 union ubifs_key key, to_key; 1691 struct ubifs_ino_node *ino; 1692 struct ubifs_trun_node *trun; 1693 struct ubifs_data_node *dn; 1694 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); 1695 int dn_size; 1696 struct ubifs_inode *ui = ubifs_inode(inode); 1697 ino_t inum = inode->i_ino; 1698 unsigned int blk; 1699 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 1700 u8 hash_dn[UBIFS_HASH_ARR_SZ]; 1701 1702 dbg_jnl("ino %lu, size %lld -> %lld", 1703 (unsigned long)inum, old_size, new_size); 1704 ubifs_assert(c, !ui->data_len); 1705 ubifs_assert(c, S_ISREG(inode->i_mode)); 1706 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 1707 1708 dn_size = COMPRESSED_DATA_NODE_BUF_SZ; 1709 1710 if (IS_ENCRYPTED(inode)) 1711 dn_size += UBIFS_CIPHER_BLOCK_SIZE; 1712 1713 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + 1714 dn_size + ubifs_auth_node_sz(c); 1715 1716 ino = kmalloc(sz, GFP_NOFS); 1717 if (!ino) 1718 return -ENOMEM; 1719 1720 trun = (void *)ino + UBIFS_INO_NODE_SZ; 1721 trun->ch.node_type = UBIFS_TRUN_NODE; 1722 trun->inum = cpu_to_le32(inum); 1723 trun->old_size = cpu_to_le64(old_size); 1724 trun->new_size = cpu_to_le64(new_size); 1725 zero_trun_node_unused(trun); 1726 1727 dlen = new_size & (UBIFS_BLOCK_SIZE - 1); 1728 if (dlen) { 1729 /* Get last data block so it can be truncated */ 1730 dn = (void *)trun + UBIFS_TRUN_NODE_SZ; 1731 blk = new_size >> UBIFS_BLOCK_SHIFT; 1732 data_key_init(c, &key, inum, blk); 1733 dbg_jnlk(&key, "last block key "); 1734 err = ubifs_tnc_lookup(c, &key, dn); 1735 if (err == -ENOENT) 1736 dlen = 0; /* Not found (so it is a hole) */ 1737 else if (err) 1738 goto out_free; 1739 else { 1740 int dn_len = le32_to_cpu(dn->size); 1741 1742 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { 1743 ubifs_err(c, "bad data node (block %u, inode %lu)", 1744 blk, inode->i_ino); 1745 ubifs_dump_node(c, dn, dn_size); 1746 err = -EUCLEAN; 1747 goto out_free; 1748 } 1749 1750 if (dn_len <= dlen) 1751 dlen = 0; /* Nothing to do */ 1752 else { 1753 err = truncate_data_node(c, inode, blk, dn, 1754 &dlen, dn_size); 1755 if (err) 1756 goto out_free; 1757 } 1758 } 1759 } 1760 1761 /* Must make reservation before allocating sequence numbers */ 1762 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ; 1763 1764 if (ubifs_authenticated(c)) 1765 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c); 1766 else 1767 len += dlen; 1768 1769 err = make_reservation(c, BASEHD, len); 1770 if (err) 1771 goto out_free; 1772 1773 pack_inode(c, ino, inode, 0); 1774 err = ubifs_node_calc_hash(c, ino, hash_ino); 1775 if (err) 1776 goto out_release; 1777 1778 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); 1779 if (dlen) { 1780 ubifs_prep_grp_node(c, dn, dlen, 1); 1781 err = ubifs_node_calc_hash(c, dn, hash_dn); 1782 if (err) 1783 goto out_release; 1784 } 1785 1786 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); 1787 if (err) 1788 goto out_release; 1789 if (!sync) 1790 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); 1791 release_head(c, BASEHD); 1792 1793 ubifs_add_auth_dirt(c, lnum); 1794 1795 if (dlen) { 1796 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ; 1797 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn); 1798 if (err) 1799 goto out_ro; 1800 } 1801 1802 ino_key_init(c, &key, inum); 1803 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino); 1804 if (err) 1805 goto out_ro; 1806 1807 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ); 1808 if (err) 1809 goto out_ro; 1810 1811 bit = new_size & (UBIFS_BLOCK_SIZE - 1); 1812 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0); 1813 data_key_init(c, &key, inum, blk); 1814 1815 bit = old_size & (UBIFS_BLOCK_SIZE - 1); 1816 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); 1817 data_key_init(c, &to_key, inum, blk); 1818 1819 err = ubifs_tnc_remove_range(c, &key, &to_key); 1820 if (err) 1821 goto out_ro; 1822 1823 finish_reservation(c); 1824 spin_lock(&ui->ui_lock); 1825 ui->synced_i_size = ui->ui_size; 1826 spin_unlock(&ui->ui_lock); 1827 mark_inode_clean(c, ui); 1828 kfree(ino); 1829 return 0; 1830 1831 out_release: 1832 release_head(c, BASEHD); 1833 out_ro: 1834 ubifs_ro_mode(c, err); 1835 finish_reservation(c); 1836 out_free: 1837 kfree(ino); 1838 return err; 1839 } 1840 1841 1842 /** 1843 * ubifs_jnl_delete_xattr - delete an extended attribute. 1844 * @c: UBIFS file-system description object 1845 * @host: host inode 1846 * @inode: extended attribute inode 1847 * @nm: extended attribute entry name 1848 * 1849 * This function delete an extended attribute which is very similar to 1850 * un-linking regular files - it writes a deletion xentry, a deletion inode and 1851 * updates the target inode. Returns zero in case of success and a negative 1852 * error code in case of failure. 1853 */ 1854 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, 1855 const struct inode *inode, 1856 const struct fscrypt_name *nm) 1857 { 1858 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len; 1859 struct ubifs_dent_node *xent; 1860 struct ubifs_ino_node *ino; 1861 union ubifs_key xent_key, key1, key2; 1862 int sync = IS_DIRSYNC(host); 1863 struct ubifs_inode *host_ui = ubifs_inode(host); 1864 u8 hash[UBIFS_HASH_ARR_SZ]; 1865 1866 ubifs_assert(c, inode->i_nlink == 0); 1867 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1868 1869 /* 1870 * Since we are deleting the inode, we do not bother to attach any data 1871 * to it and assume its length is %UBIFS_INO_NODE_SZ. 1872 */ 1873 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 1874 aligned_xlen = ALIGN(xlen, 8); 1875 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; 1876 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); 1877 1878 write_len = len + ubifs_auth_node_sz(c); 1879 1880 xent = kzalloc(write_len, GFP_NOFS); 1881 if (!xent) 1882 return -ENOMEM; 1883 1884 /* Make reservation before allocating sequence numbers */ 1885 err = make_reservation(c, BASEHD, write_len); 1886 if (err) { 1887 kfree(xent); 1888 return err; 1889 } 1890 1891 xent->ch.node_type = UBIFS_XENT_NODE; 1892 xent_key_init(c, &xent_key, host->i_ino, nm); 1893 key_write(c, &xent_key, xent->key); 1894 xent->inum = 0; 1895 xent->type = get_dent_type(inode->i_mode); 1896 xent->nlen = cpu_to_le16(fname_len(nm)); 1897 memcpy(xent->name, fname_name(nm), fname_len(nm)); 1898 xent->name[fname_len(nm)] = '\0'; 1899 zero_dent_node_unused(xent); 1900 ubifs_prep_grp_node(c, xent, xlen, 0); 1901 1902 ino = (void *)xent + aligned_xlen; 1903 pack_inode(c, ino, inode, 0); 1904 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1905 pack_inode(c, ino, host, 1); 1906 err = ubifs_node_calc_hash(c, ino, hash); 1907 if (err) 1908 goto out_release; 1909 1910 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync); 1911 if (!sync && !err) 1912 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); 1913 release_head(c, BASEHD); 1914 1915 ubifs_add_auth_dirt(c, lnum); 1916 kfree(xent); 1917 if (err) 1918 goto out_ro; 1919 1920 /* Remove the extended attribute entry from TNC */ 1921 err = ubifs_tnc_remove_nm(c, &xent_key, nm); 1922 if (err) 1923 goto out_ro; 1924 err = ubifs_add_dirt(c, lnum, xlen); 1925 if (err) 1926 goto out_ro; 1927 1928 /* 1929 * Remove all nodes belonging to the extended attribute inode from TNC. 1930 * Well, there actually must be only one node - the inode itself. 1931 */ 1932 lowest_ino_key(c, &key1, inode->i_ino); 1933 highest_ino_key(c, &key2, inode->i_ino); 1934 err = ubifs_tnc_remove_range(c, &key1, &key2); 1935 if (err) 1936 goto out_ro; 1937 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ); 1938 if (err) 1939 goto out_ro; 1940 1941 /* And update TNC with the new host inode position */ 1942 ino_key_init(c, &key1, host->i_ino); 1943 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash); 1944 if (err) 1945 goto out_ro; 1946 1947 finish_reservation(c); 1948 spin_lock(&host_ui->ui_lock); 1949 host_ui->synced_i_size = host_ui->ui_size; 1950 spin_unlock(&host_ui->ui_lock); 1951 mark_inode_clean(c, host_ui); 1952 return 0; 1953 1954 out_release: 1955 kfree(xent); 1956 release_head(c, BASEHD); 1957 out_ro: 1958 ubifs_ro_mode(c, err); 1959 finish_reservation(c); 1960 return err; 1961 } 1962 1963 /** 1964 * ubifs_jnl_change_xattr - change an extended attribute. 1965 * @c: UBIFS file-system description object 1966 * @inode: extended attribute inode 1967 * @host: host inode 1968 * 1969 * This function writes the updated version of an extended attribute inode and 1970 * the host inode to the journal (to the base head). The host inode is written 1971 * after the extended attribute inode in order to guarantee that the extended 1972 * attribute will be flushed when the inode is synchronized by 'fsync()' and 1973 * consequently, the write-buffer is synchronized. This function returns zero 1974 * in case of success and a negative error code in case of failure. 1975 */ 1976 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, 1977 const struct inode *host) 1978 { 1979 int err, len1, len2, aligned_len, aligned_len1, lnum, offs; 1980 struct ubifs_inode *host_ui = ubifs_inode(host); 1981 struct ubifs_ino_node *ino; 1982 union ubifs_key key; 1983 int sync = IS_DIRSYNC(host); 1984 u8 hash_host[UBIFS_HASH_ARR_SZ]; 1985 u8 hash[UBIFS_HASH_ARR_SZ]; 1986 1987 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); 1988 ubifs_assert(c, inode->i_nlink > 0); 1989 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1990 1991 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len; 1992 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len; 1993 aligned_len1 = ALIGN(len1, 8); 1994 aligned_len = aligned_len1 + ALIGN(len2, 8); 1995 1996 aligned_len += ubifs_auth_node_sz(c); 1997 1998 ino = kzalloc(aligned_len, GFP_NOFS); 1999 if (!ino) 2000 return -ENOMEM; 2001 2002 /* Make reservation before allocating sequence numbers */ 2003 err = make_reservation(c, BASEHD, aligned_len); 2004 if (err) 2005 goto out_free; 2006 2007 pack_inode(c, ino, host, 0); 2008 err = ubifs_node_calc_hash(c, ino, hash_host); 2009 if (err) 2010 goto out_release; 2011 pack_inode(c, (void *)ino + aligned_len1, inode, 1); 2012 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash); 2013 if (err) 2014 goto out_release; 2015 2016 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); 2017 if (!sync && !err) { 2018 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 2019 2020 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino); 2021 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 2022 } 2023 release_head(c, BASEHD); 2024 if (err) 2025 goto out_ro; 2026 2027 ubifs_add_auth_dirt(c, lnum); 2028 2029 ino_key_init(c, &key, host->i_ino); 2030 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host); 2031 if (err) 2032 goto out_ro; 2033 2034 ino_key_init(c, &key, inode->i_ino); 2035 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash); 2036 if (err) 2037 goto out_ro; 2038 2039 finish_reservation(c); 2040 spin_lock(&host_ui->ui_lock); 2041 host_ui->synced_i_size = host_ui->ui_size; 2042 spin_unlock(&host_ui->ui_lock); 2043 mark_inode_clean(c, host_ui); 2044 kfree(ino); 2045 return 0; 2046 2047 out_release: 2048 release_head(c, BASEHD); 2049 out_ro: 2050 ubifs_ro_mode(c, err); 2051 finish_reservation(c); 2052 out_free: 2053 kfree(ino); 2054 return err; 2055 } 2056 2057