1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of UBIFS. 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation. 6 * 7 * Authors: Artem Bityutskiy (Битюцкий Артём) 8 * Adrian Hunter 9 */ 10 11 /* 12 * This file implements UBIFS journal. 13 * 14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed 15 * length and position, while a bud logical eraseblock is any LEB in the main 16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log 17 * contains only references to buds and some other stuff like commit 18 * start node. The idea is that when we commit the journal, we do 19 * not copy the data, the buds just become indexed. Since after the commit the 20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we 21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will 22 * become leafs in the future. 23 * 24 * The journal is multi-headed because we want to write data to the journal as 25 * optimally as possible. It is nice to have nodes belonging to the same inode 26 * in one LEB, so we may write data owned by different inodes to different 27 * journal heads, although at present only one data head is used. 28 * 29 * For recovery reasons, the base head contains all inode nodes, all directory 30 * entry nodes and all truncate nodes. This means that the other heads contain 31 * only data nodes. 32 * 33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the 34 * time of commit, the bud is retained to continue to be used in the journal, 35 * even though the "front" of the LEB is now indexed. In that case, the log 36 * reference contains the offset where the bud starts for the purposes of the 37 * journal. 38 * 39 * The journal size has to be limited, because the larger is the journal, the 40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it 41 * takes (indexing in the TNC). 42 * 43 * All the journal write operations like 'ubifs_jnl_update()' here, which write 44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to 45 * unclean reboots. Should the unclean reboot happen, the recovery code drops 46 * all the nodes. 47 */ 48 49 #include "ubifs.h" 50 51 /** 52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node. 53 * @ino: the inode to zero out 54 */ 55 static inline void zero_ino_node_unused(struct ubifs_ino_node *ino) 56 { 57 memset(ino->padding1, 0, 4); 58 memset(ino->padding2, 0, 26); 59 } 60 61 /** 62 * zero_dent_node_unused - zero out unused fields of an on-flash directory 63 * entry node. 64 * @dent: the directory entry to zero out 65 */ 66 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent) 67 { 68 dent->padding1 = 0; 69 } 70 71 /** 72 * zero_trun_node_unused - zero out unused fields of an on-flash truncation 73 * node. 74 * @trun: the truncation node to zero out 75 */ 76 static inline void zero_trun_node_unused(struct ubifs_trun_node *trun) 77 { 78 memset(trun->padding, 0, 12); 79 } 80 81 static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum) 82 { 83 if (ubifs_authenticated(c)) 84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c)); 85 } 86 87 /** 88 * reserve_space - reserve space in the journal. 89 * @c: UBIFS file-system description object 90 * @jhead: journal head number 91 * @len: node length 92 * 93 * This function reserves space in journal head @head. If the reservation 94 * succeeded, the journal head stays locked and later has to be unlocked using 95 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to 96 * be done, and other negative error codes in case of other failures. 97 */ 98 static int reserve_space(struct ubifs_info *c, int jhead, int len) 99 { 100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; 101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 102 103 /* 104 * Typically, the base head has smaller nodes written to it, so it is 105 * better to try to allocate space at the ends of eraseblocks. This is 106 * what the squeeze parameter does. 107 */ 108 ubifs_assert(c, !c->ro_media && !c->ro_mount); 109 squeeze = (jhead == BASEHD); 110 again: 111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 112 113 if (c->ro_error) { 114 err = -EROFS; 115 goto out_unlock; 116 } 117 118 avail = c->leb_size - wbuf->offs - wbuf->used; 119 if (wbuf->lnum != -1 && avail >= len) 120 return 0; 121 122 /* 123 * Write buffer wasn't seek'ed or there is no enough space - look for an 124 * LEB with some empty space. 125 */ 126 lnum = ubifs_find_free_space(c, len, &offs, squeeze); 127 if (lnum >= 0) 128 goto out; 129 130 err = lnum; 131 if (err != -ENOSPC) 132 goto out_unlock; 133 134 /* 135 * No free space, we have to run garbage collector to make 136 * some. But the write-buffer mutex has to be unlocked because 137 * GC also takes it. 138 */ 139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead)); 140 mutex_unlock(&wbuf->io_mutex); 141 142 lnum = ubifs_garbage_collect(c, 0); 143 if (lnum < 0) { 144 err = lnum; 145 if (err != -ENOSPC) 146 return err; 147 148 /* 149 * GC could not make a free LEB. But someone else may 150 * have allocated new bud for this journal head, 151 * because we dropped @wbuf->io_mutex, so try once 152 * again. 153 */ 154 dbg_jnl("GC couldn't make a free LEB for jhead %s", 155 dbg_jhead(jhead)); 156 if (retries++ < 2) { 157 dbg_jnl("retry (%d)", retries); 158 goto again; 159 } 160 161 dbg_jnl("return -ENOSPC"); 162 return err; 163 } 164 165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead)); 167 avail = c->leb_size - wbuf->offs - wbuf->used; 168 169 if (wbuf->lnum != -1 && avail >= len) { 170 /* 171 * Someone else has switched the journal head and we have 172 * enough space now. This happens when more than one process is 173 * trying to write to the same journal head at the same time. 174 */ 175 dbg_jnl("return LEB %d back, already have LEB %d:%d", 176 lnum, wbuf->lnum, wbuf->offs + wbuf->used); 177 err = ubifs_return_leb(c, lnum); 178 if (err) 179 goto out_unlock; 180 return 0; 181 } 182 183 offs = 0; 184 185 out: 186 /* 187 * Make sure we synchronize the write-buffer before we add the new bud 188 * to the log. Otherwise we may have a power cut after the log 189 * reference node for the last bud (@lnum) is written but before the 190 * write-buffer data are written to the next-to-last bud 191 * (@wbuf->lnum). And the effect would be that the recovery would see 192 * that there is corruption in the next-to-last bud. 193 */ 194 err = ubifs_wbuf_sync_nolock(wbuf); 195 if (err) 196 goto out_return; 197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs); 198 if (err) 199 goto out_return; 200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); 201 if (err) 202 goto out_unlock; 203 204 return 0; 205 206 out_unlock: 207 mutex_unlock(&wbuf->io_mutex); 208 return err; 209 210 out_return: 211 /* An error occurred and the LEB has to be returned to lprops */ 212 ubifs_assert(c, err < 0); 213 err1 = ubifs_return_leb(c, lnum); 214 if (err1 && err == -EAGAIN) 215 /* 216 * Return original error code only if it is not %-EAGAIN, 217 * which is not really an error. Otherwise, return the error 218 * code of 'ubifs_return_leb()'. 219 */ 220 err = err1; 221 mutex_unlock(&wbuf->io_mutex); 222 return err; 223 } 224 225 static int ubifs_hash_nodes(struct ubifs_info *c, void *node, 226 int len, struct shash_desc *hash) 227 { 228 int auth_node_size = ubifs_auth_node_sz(c); 229 int err; 230 231 while (1) { 232 const struct ubifs_ch *ch = node; 233 int nodelen = le32_to_cpu(ch->len); 234 235 ubifs_assert(c, len >= auth_node_size); 236 237 if (len == auth_node_size) 238 break; 239 240 ubifs_assert(c, len > nodelen); 241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC)); 242 243 err = ubifs_shash_update(c, hash, (void *)node, nodelen); 244 if (err) 245 return err; 246 247 node += ALIGN(nodelen, 8); 248 len -= ALIGN(nodelen, 8); 249 } 250 251 return ubifs_prepare_auth_node(c, node, hash); 252 } 253 254 /** 255 * write_head - write data to a journal head. 256 * @c: UBIFS file-system description object 257 * @jhead: journal head 258 * @buf: buffer to write 259 * @len: length to write 260 * @lnum: LEB number written is returned here 261 * @offs: offset written is returned here 262 * @sync: non-zero if the write-buffer has to by synchronized 263 * 264 * This function writes data to the reserved space of journal head @jhead. 265 * Returns zero in case of success and a negative error code in case of 266 * failure. 267 */ 268 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, 269 int *lnum, int *offs, int sync) 270 { 271 int err; 272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 273 274 ubifs_assert(c, jhead != GCHD); 275 276 *lnum = c->jheads[jhead].wbuf.lnum; 277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 278 dbg_jnl("jhead %s, LEB %d:%d, len %d", 279 dbg_jhead(jhead), *lnum, *offs, len); 280 281 if (ubifs_authenticated(c)) { 282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash); 283 if (err) 284 return err; 285 } 286 287 err = ubifs_wbuf_write_nolock(wbuf, buf, len); 288 if (err) 289 return err; 290 if (sync) 291 err = ubifs_wbuf_sync_nolock(wbuf); 292 return err; 293 } 294 295 /** 296 * __queue_and_wait - queue a task and wait until the task is waked up. 297 * @c: UBIFS file-system description object 298 * 299 * This function adds current task in queue and waits until the task is waked 300 * up. This function should be called with @c->reserve_space_wq locked. 301 */ 302 static void __queue_and_wait(struct ubifs_info *c) 303 { 304 DEFINE_WAIT(wait); 305 306 __add_wait_queue_entry_tail_exclusive(&c->reserve_space_wq, &wait); 307 set_current_state(TASK_UNINTERRUPTIBLE); 308 spin_unlock(&c->reserve_space_wq.lock); 309 310 schedule(); 311 finish_wait(&c->reserve_space_wq, &wait); 312 } 313 314 /** 315 * wait_for_reservation - try queuing current task to wait until waked up. 316 * @c: UBIFS file-system description object 317 * 318 * This function queues current task to wait until waked up, if queuing is 319 * started(@c->need_wait_space is not %0). Returns %true if current task is 320 * added in queue, otherwise %false is returned. 321 */ 322 static bool wait_for_reservation(struct ubifs_info *c) 323 { 324 if (likely(atomic_read(&c->need_wait_space) == 0)) 325 /* Quick path to check whether queuing is started. */ 326 return false; 327 328 spin_lock(&c->reserve_space_wq.lock); 329 if (atomic_read(&c->need_wait_space) == 0) { 330 /* Queuing is not started, don't queue current task. */ 331 spin_unlock(&c->reserve_space_wq.lock); 332 return false; 333 } 334 335 __queue_and_wait(c); 336 return true; 337 } 338 339 /** 340 * wake_up_reservation - wake up first task in queue or stop queuing. 341 * @c: UBIFS file-system description object 342 * 343 * This function wakes up the first task in queue if it exists, or stops 344 * queuing if no tasks in queue. 345 */ 346 static void wake_up_reservation(struct ubifs_info *c) 347 { 348 spin_lock(&c->reserve_space_wq.lock); 349 if (waitqueue_active(&c->reserve_space_wq)) 350 wake_up_locked(&c->reserve_space_wq); 351 else 352 /* 353 * Compared with wait_for_reservation(), set @c->need_wait_space 354 * under the protection of wait queue lock, which can avoid that 355 * @c->need_wait_space is set to 0 after new task queued. 356 */ 357 atomic_set(&c->need_wait_space, 0); 358 spin_unlock(&c->reserve_space_wq.lock); 359 } 360 361 /** 362 * add_or_start_queue - add current task in queue or start queuing. 363 * @c: UBIFS file-system description object 364 * 365 * This function starts queuing if queuing is not started, otherwise adds 366 * current task in queue. 367 */ 368 static void add_or_start_queue(struct ubifs_info *c) 369 { 370 spin_lock(&c->reserve_space_wq.lock); 371 if (atomic_cmpxchg(&c->need_wait_space, 0, 1) == 0) { 372 /* Starts queuing, task can go on directly. */ 373 spin_unlock(&c->reserve_space_wq.lock); 374 return; 375 } 376 377 /* 378 * There are at least two tasks have retried more than 32 times 379 * at certain point, first task has started queuing, just queue 380 * the left tasks. 381 */ 382 __queue_and_wait(c); 383 } 384 385 /** 386 * make_reservation - reserve journal space. 387 * @c: UBIFS file-system description object 388 * @jhead: journal head 389 * @len: how many bytes to reserve 390 * 391 * This function makes space reservation in journal head @jhead. The function 392 * takes the commit lock and locks the journal head, and the caller has to 393 * unlock the head and finish the reservation with 'finish_reservation()'. 394 * Returns zero in case of success and a negative error code in case of 395 * failure. 396 * 397 * Note, the journal head may be unlocked as soon as the data is written, while 398 * the commit lock has to be released after the data has been added to the 399 * TNC. 400 */ 401 static int make_reservation(struct ubifs_info *c, int jhead, int len) 402 { 403 int err, cmt_retries = 0, nospc_retries = 0; 404 bool blocked = wait_for_reservation(c); 405 406 again: 407 down_read(&c->commit_sem); 408 err = reserve_space(c, jhead, len); 409 if (!err) { 410 /* c->commit_sem will get released via finish_reservation(). */ 411 goto out_wake_up; 412 } 413 up_read(&c->commit_sem); 414 415 if (err == -ENOSPC) { 416 /* 417 * GC could not make any progress. We should try to commit 418 * because it could make some dirty space and GC would make 419 * progress, so make the error -EAGAIN so that the below 420 * will commit and re-try. 421 */ 422 nospc_retries++; 423 dbg_jnl("no space, retry"); 424 err = -EAGAIN; 425 } 426 427 if (err != -EAGAIN) 428 goto out; 429 430 /* 431 * -EAGAIN means that the journal is full or too large, or the above 432 * code wants to do one commit. Do this and re-try. 433 */ 434 if (cmt_retries > 128) { 435 /* 436 * This should not happen unless: 437 * 1. The journal size limitations are too tough. 438 * 2. The budgeting is incorrect. We always have to be able to 439 * write to the media, because all operations are budgeted. 440 * Deletions are not budgeted, though, but we reserve an 441 * extra LEB for them. 442 */ 443 ubifs_err(c, "stuck in space allocation, nospc_retries %d", 444 nospc_retries); 445 err = -ENOSPC; 446 goto out; 447 } else if (cmt_retries > 32) { 448 /* 449 * It's almost impossible to happen, unless there are many tasks 450 * making reservation concurrently and someone task has retried 451 * gc + commit for many times, generated available space during 452 * this period are grabbed by other tasks. 453 * But if it happens, start queuing up all tasks that will make 454 * space reservation, then there is only one task making space 455 * reservation at any time, and it can always make success under 456 * the premise of correct budgeting. 457 */ 458 ubifs_warn(c, "too many space allocation cmt_retries (%d) " 459 "nospc_retries (%d), start queuing tasks", 460 cmt_retries, nospc_retries); 461 462 if (!blocked) { 463 blocked = true; 464 add_or_start_queue(c); 465 } 466 } 467 468 dbg_jnl("-EAGAIN, commit and retry (retried %d times)", 469 cmt_retries); 470 cmt_retries += 1; 471 472 err = ubifs_run_commit(c); 473 if (err) 474 goto out_wake_up; 475 goto again; 476 477 out: 478 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", 479 len, jhead, err); 480 if (err == -ENOSPC) { 481 /* This are some budgeting problems, print useful information */ 482 down_write(&c->commit_sem); 483 dump_stack(); 484 ubifs_dump_budg(c, &c->bi); 485 ubifs_dump_lprops(c); 486 cmt_retries = dbg_check_lprops(c); 487 up_write(&c->commit_sem); 488 } 489 out_wake_up: 490 if (blocked) { 491 /* 492 * Only tasks that have ever started queuing or ever been queued 493 * can wake up other queued tasks, which can make sure that 494 * there is only one task waked up to make space reservation. 495 * For example: 496 * task A task B task C 497 * make_reservation make_reservation 498 * reserve_space // 0 499 * wake_up_reservation 500 * atomic_cmpxchg // 0, start queuing 501 * reserve_space 502 * wait_for_reservation 503 * __queue_and_wait 504 * add_wait_queue 505 * if (blocked) // false 506 * // So that task C won't be waked up to race with task B 507 */ 508 wake_up_reservation(c); 509 } 510 return err; 511 } 512 513 /** 514 * release_head - release a journal head. 515 * @c: UBIFS file-system description object 516 * @jhead: journal head 517 * 518 * This function releases journal head @jhead which was locked by 519 * the 'make_reservation()' function. It has to be called after each successful 520 * 'make_reservation()' invocation. 521 */ 522 static inline void release_head(struct ubifs_info *c, int jhead) 523 { 524 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex); 525 } 526 527 /** 528 * finish_reservation - finish a reservation. 529 * @c: UBIFS file-system description object 530 * 531 * This function finishes journal space reservation. It must be called after 532 * 'make_reservation()'. 533 */ 534 static void finish_reservation(struct ubifs_info *c) 535 { 536 up_read(&c->commit_sem); 537 } 538 539 /** 540 * get_dent_type - translate VFS inode mode to UBIFS directory entry type. 541 * @mode: inode mode 542 */ 543 static int get_dent_type(int mode) 544 { 545 switch (mode & S_IFMT) { 546 case S_IFREG: 547 return UBIFS_ITYPE_REG; 548 case S_IFDIR: 549 return UBIFS_ITYPE_DIR; 550 case S_IFLNK: 551 return UBIFS_ITYPE_LNK; 552 case S_IFBLK: 553 return UBIFS_ITYPE_BLK; 554 case S_IFCHR: 555 return UBIFS_ITYPE_CHR; 556 case S_IFIFO: 557 return UBIFS_ITYPE_FIFO; 558 case S_IFSOCK: 559 return UBIFS_ITYPE_SOCK; 560 default: 561 BUG(); 562 } 563 return 0; 564 } 565 566 /** 567 * pack_inode - pack an inode node. 568 * @c: UBIFS file-system description object 569 * @ino: buffer in which to pack inode node 570 * @inode: inode to pack 571 * @last: indicates the last node of the group 572 */ 573 static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino, 574 const struct inode *inode, int last) 575 { 576 int data_len = 0, last_reference = !inode->i_nlink; 577 struct ubifs_inode *ui = ubifs_inode(inode); 578 579 ino->ch.node_type = UBIFS_INO_NODE; 580 ino_key_init_flash(c, &ino->key, inode->i_ino); 581 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum); 582 ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode)); 583 ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode)); 584 ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode)); 585 ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 586 ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode)); 587 ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 588 ino->uid = cpu_to_le32(i_uid_read(inode)); 589 ino->gid = cpu_to_le32(i_gid_read(inode)); 590 ino->mode = cpu_to_le32(inode->i_mode); 591 ino->flags = cpu_to_le32(ui->flags); 592 ino->size = cpu_to_le64(ui->ui_size); 593 ino->nlink = cpu_to_le32(inode->i_nlink); 594 ino->compr_type = cpu_to_le16(ui->compr_type); 595 ino->data_len = cpu_to_le32(ui->data_len); 596 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt); 597 ino->xattr_size = cpu_to_le32(ui->xattr_size); 598 ino->xattr_names = cpu_to_le32(ui->xattr_names); 599 zero_ino_node_unused(ino); 600 601 /* 602 * Drop the attached data if this is a deletion inode, the data is not 603 * needed anymore. 604 */ 605 if (!last_reference) { 606 memcpy(ino->data, ui->data, ui->data_len); 607 data_len = ui->data_len; 608 } 609 610 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last); 611 } 612 613 /** 614 * mark_inode_clean - mark UBIFS inode as clean. 615 * @c: UBIFS file-system description object 616 * @ui: UBIFS inode to mark as clean 617 * 618 * This helper function marks UBIFS inode @ui as clean by cleaning the 619 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the 620 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would 621 * just do nothing. 622 */ 623 static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui) 624 { 625 if (ui->dirty) 626 ubifs_release_dirty_inode_budget(c, ui); 627 ui->dirty = 0; 628 } 629 630 static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent) 631 { 632 if (c->double_hash) 633 dent->cookie = (__force __le32) get_random_u32(); 634 else 635 dent->cookie = 0; 636 } 637 638 /** 639 * ubifs_jnl_update - update inode. 640 * @c: UBIFS file-system description object 641 * @dir: parent inode or host inode in case of extended attributes 642 * @nm: directory entry name 643 * @inode: inode to update 644 * @deletion: indicates a directory entry deletion i.e unlink or rmdir 645 * @xent: non-zero if the directory entry is an extended attribute entry 646 * @in_orphan: indicates whether the @inode is in orphan list 647 * 648 * This function updates an inode by writing a directory entry (or extended 649 * attribute entry), the inode itself, and the parent directory inode (or the 650 * host inode) to the journal. 651 * 652 * The function writes the host inode @dir last, which is important in case of 653 * extended attributes. Indeed, then we guarantee that if the host inode gets 654 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed, 655 * the extended attribute inode gets flushed too. And this is exactly what the 656 * user expects - synchronizing the host inode synchronizes its extended 657 * attributes. Similarly, this guarantees that if @dir is synchronized, its 658 * directory entry corresponding to @nm gets synchronized too. 659 * 660 * If the inode (@inode) or the parent directory (@dir) are synchronous, this 661 * function synchronizes the write-buffer. 662 * 663 * This function marks the @dir and @inode inodes as clean and returns zero on 664 * success. In case of failure, a negative error code is returned. 665 */ 666 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, 667 const struct fscrypt_name *nm, const struct inode *inode, 668 int deletion, int xent, int in_orphan) 669 { 670 int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; 671 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); 672 int last_reference = !!(deletion && inode->i_nlink == 0); 673 struct ubifs_inode *ui = ubifs_inode(inode); 674 struct ubifs_inode *host_ui = ubifs_inode(dir); 675 struct ubifs_dent_node *dent; 676 struct ubifs_ino_node *ino; 677 union ubifs_key dent_key, ino_key; 678 u8 hash_dent[UBIFS_HASH_ARR_SZ]; 679 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 680 u8 hash_ino_host[UBIFS_HASH_ARR_SZ]; 681 682 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 683 684 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 685 ilen = UBIFS_INO_NODE_SZ; 686 687 /* 688 * If the last reference to the inode is being deleted, then there is 689 * no need to attach and write inode data, it is being deleted anyway. 690 * And if the inode is being deleted, no need to synchronize 691 * write-buffer even if the inode is synchronous. 692 */ 693 if (!last_reference) { 694 ilen += ui->data_len; 695 sync |= IS_SYNC(inode); 696 } 697 698 aligned_dlen = ALIGN(dlen, 8); 699 aligned_ilen = ALIGN(ilen, 8); 700 701 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; 702 /* Make sure to also account for extended attributes */ 703 if (ubifs_authenticated(c)) 704 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c); 705 else 706 len += host_ui->data_len; 707 708 dent = kzalloc(len, GFP_NOFS); 709 if (!dent) 710 return -ENOMEM; 711 712 /* Make reservation before allocating sequence numbers */ 713 err = make_reservation(c, BASEHD, len); 714 if (err) 715 goto out_free; 716 717 if (!xent) { 718 dent->ch.node_type = UBIFS_DENT_NODE; 719 if (fname_name(nm) == NULL) 720 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash); 721 else 722 dent_key_init(c, &dent_key, dir->i_ino, nm); 723 } else { 724 dent->ch.node_type = UBIFS_XENT_NODE; 725 xent_key_init(c, &dent_key, dir->i_ino, nm); 726 } 727 728 key_write(c, &dent_key, dent->key); 729 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino); 730 dent->type = get_dent_type(inode->i_mode); 731 dent->nlen = cpu_to_le16(fname_len(nm)); 732 memcpy(dent->name, fname_name(nm), fname_len(nm)); 733 dent->name[fname_len(nm)] = '\0'; 734 set_dent_cookie(c, dent); 735 736 zero_dent_node_unused(dent); 737 ubifs_prep_grp_node(c, dent, dlen, 0); 738 err = ubifs_node_calc_hash(c, dent, hash_dent); 739 if (err) 740 goto out_release; 741 742 ino = (void *)dent + aligned_dlen; 743 pack_inode(c, ino, inode, 0); 744 err = ubifs_node_calc_hash(c, ino, hash_ino); 745 if (err) 746 goto out_release; 747 748 ino = (void *)ino + aligned_ilen; 749 pack_inode(c, ino, dir, 1); 750 err = ubifs_node_calc_hash(c, ino, hash_ino_host); 751 if (err) 752 goto out_release; 753 754 if (last_reference && !in_orphan) { 755 err = ubifs_add_orphan(c, inode->i_ino); 756 if (err) { 757 release_head(c, BASEHD); 758 goto out_finish; 759 } 760 ui->del_cmtno = c->cmt_no; 761 orphan_added = 1; 762 } 763 764 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); 765 if (err) 766 goto out_release; 767 if (!sync) { 768 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 769 770 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 771 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino); 772 } 773 release_head(c, BASEHD); 774 kfree(dent); 775 ubifs_add_auth_dirt(c, lnum); 776 777 if (deletion) { 778 if (fname_name(nm) == NULL) 779 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash); 780 else 781 err = ubifs_tnc_remove_nm(c, &dent_key, nm); 782 if (err) 783 goto out_ro; 784 err = ubifs_add_dirt(c, lnum, dlen); 785 } else 786 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, 787 hash_dent, nm); 788 if (err) 789 goto out_ro; 790 791 /* 792 * Note, we do not remove the inode from TNC even if the last reference 793 * to it has just been deleted, because the inode may still be opened. 794 * Instead, the inode has been added to orphan lists and the orphan 795 * subsystem will take further care about it. 796 */ 797 ino_key_init(c, &ino_key, inode->i_ino); 798 ino_offs = dent_offs + aligned_dlen; 799 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino); 800 if (err) 801 goto out_ro; 802 803 ino_key_init(c, &ino_key, dir->i_ino); 804 ino_offs += aligned_ilen; 805 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, 806 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host); 807 if (err) 808 goto out_ro; 809 810 if (in_orphan && inode->i_nlink) 811 ubifs_delete_orphan(c, inode->i_ino); 812 813 finish_reservation(c); 814 spin_lock(&ui->ui_lock); 815 ui->synced_i_size = ui->ui_size; 816 spin_unlock(&ui->ui_lock); 817 if (xent) { 818 spin_lock(&host_ui->ui_lock); 819 host_ui->synced_i_size = host_ui->ui_size; 820 spin_unlock(&host_ui->ui_lock); 821 } 822 mark_inode_clean(c, ui); 823 mark_inode_clean(c, host_ui); 824 return 0; 825 826 out_finish: 827 finish_reservation(c); 828 out_free: 829 kfree(dent); 830 return err; 831 832 out_release: 833 release_head(c, BASEHD); 834 kfree(dent); 835 out_ro: 836 ubifs_ro_mode(c, err); 837 if (orphan_added) 838 ubifs_delete_orphan(c, inode->i_ino); 839 finish_reservation(c); 840 return err; 841 } 842 843 /** 844 * ubifs_jnl_write_data - write a data node to the journal. 845 * @c: UBIFS file-system description object 846 * @inode: inode the data node belongs to 847 * @key: node key 848 * @folio: buffer to write 849 * @offset: offset to write at 850 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE) 851 * 852 * This function writes a data node to the journal. Returns %0 if the data node 853 * was successfully written, and a negative error code in case of failure. 854 */ 855 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, 856 const union ubifs_key *key, struct folio *folio, 857 size_t offset, int len) 858 { 859 struct ubifs_data_node *data; 860 int err, lnum, offs, compr_type, out_len, compr_len, auth_len; 861 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1; 862 int write_len; 863 struct ubifs_inode *ui = ubifs_inode(inode); 864 bool encrypted = IS_ENCRYPTED(inode); 865 u8 hash[UBIFS_HASH_ARR_SZ]; 866 867 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", 868 (unsigned long)key_inum(c, key), key_block(c, key), len); 869 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE); 870 871 if (encrypted) 872 dlen += UBIFS_CIPHER_BLOCK_SIZE; 873 874 auth_len = ubifs_auth_node_sz(c); 875 876 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN); 877 if (!data) { 878 /* 879 * Fall-back to the write reserve buffer. Note, we might be 880 * currently on the memory reclaim path, when the kernel is 881 * trying to free some memory by writing out dirty pages. The 882 * write reserve buffer helps us to guarantee that we are 883 * always able to write the data. 884 */ 885 allocated = 0; 886 mutex_lock(&c->write_reserve_mutex); 887 data = c->write_reserve_buf; 888 } 889 890 data->ch.node_type = UBIFS_DATA_NODE; 891 key_write(c, key, &data->key); 892 data->size = cpu_to_le32(len); 893 894 if (!(ui->flags & UBIFS_COMPR_FL)) 895 /* Compression is disabled for this inode */ 896 compr_type = UBIFS_COMPR_NONE; 897 else 898 compr_type = ui->compr_type; 899 900 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ; 901 ubifs_compress_folio(c, folio, offset, len, &data->data, &compr_len, 902 &compr_type); 903 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); 904 905 if (encrypted) { 906 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key)); 907 if (err) 908 goto out_free; 909 910 } else { 911 data->compr_size = 0; 912 out_len = compr_len; 913 } 914 915 dlen = UBIFS_DATA_NODE_SZ + out_len; 916 if (ubifs_authenticated(c)) 917 write_len = ALIGN(dlen, 8) + auth_len; 918 else 919 write_len = dlen; 920 921 data->compr_type = cpu_to_le16(compr_type); 922 923 /* Make reservation before allocating sequence numbers */ 924 err = make_reservation(c, DATAHD, write_len); 925 if (err) 926 goto out_free; 927 928 ubifs_prepare_node(c, data, dlen, 0); 929 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0); 930 if (err) 931 goto out_release; 932 933 err = ubifs_node_calc_hash(c, data, hash); 934 if (err) 935 goto out_release; 936 937 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); 938 release_head(c, DATAHD); 939 940 ubifs_add_auth_dirt(c, lnum); 941 942 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash); 943 if (err) 944 goto out_ro; 945 946 finish_reservation(c); 947 if (!allocated) 948 mutex_unlock(&c->write_reserve_mutex); 949 else 950 kfree(data); 951 return 0; 952 953 out_release: 954 release_head(c, DATAHD); 955 out_ro: 956 ubifs_ro_mode(c, err); 957 finish_reservation(c); 958 out_free: 959 if (!allocated) 960 mutex_unlock(&c->write_reserve_mutex); 961 else 962 kfree(data); 963 return err; 964 } 965 966 /** 967 * ubifs_jnl_write_inode - flush inode to the journal. 968 * @c: UBIFS file-system description object 969 * @inode: inode to flush 970 * 971 * This function writes inode @inode to the journal. If the inode is 972 * synchronous, it also synchronizes the write-buffer. Returns zero in case of 973 * success and a negative error code in case of failure. 974 */ 975 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) 976 { 977 int err, lnum, offs; 978 struct ubifs_ino_node *ino, *ino_start; 979 struct ubifs_inode *ui = ubifs_inode(inode); 980 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ; 981 int last_reference = !inode->i_nlink; 982 int kill_xattrs = ui->xattr_cnt && last_reference; 983 u8 hash[UBIFS_HASH_ARR_SZ]; 984 985 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink); 986 987 if (kill_xattrs && ui->xattr_cnt > ubifs_xattr_max_cnt(c)) { 988 ubifs_err(c, "Cannot delete inode, it has too much xattrs!"); 989 err = -EPERM; 990 ubifs_ro_mode(c, err); 991 return err; 992 } 993 994 /* 995 * If the inode is being deleted, do not write the attached data. No 996 * need to synchronize the write-buffer either. 997 */ 998 if (!last_reference) { 999 ilen += ui->data_len; 1000 sync = IS_SYNC(inode); 1001 } else if (kill_xattrs) { 1002 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt; 1003 } 1004 1005 if (ubifs_authenticated(c)) 1006 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c); 1007 else 1008 write_len += ilen; 1009 1010 ino_start = ino = kmalloc(write_len, GFP_NOFS); 1011 if (!ino) 1012 return -ENOMEM; 1013 1014 /* Make reservation before allocating sequence numbers */ 1015 err = make_reservation(c, BASEHD, write_len); 1016 if (err) 1017 goto out_free; 1018 1019 if (kill_xattrs) { 1020 union ubifs_key key; 1021 struct fscrypt_name nm = {0}; 1022 struct inode *xino; 1023 struct ubifs_dent_node *xent, *pxent = NULL; 1024 1025 lowest_xent_key(c, &key, inode->i_ino); 1026 while (1) { 1027 xent = ubifs_tnc_next_ent(c, &key, &nm); 1028 if (IS_ERR(xent)) { 1029 err = PTR_ERR(xent); 1030 if (err == -ENOENT) 1031 break; 1032 1033 kfree(pxent); 1034 goto out_release; 1035 } 1036 1037 fname_name(&nm) = xent->name; 1038 fname_len(&nm) = le16_to_cpu(xent->nlen); 1039 1040 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); 1041 if (IS_ERR(xino)) { 1042 err = PTR_ERR(xino); 1043 ubifs_err(c, "dead directory entry '%s', error %d", 1044 xent->name, err); 1045 ubifs_ro_mode(c, err); 1046 kfree(pxent); 1047 kfree(xent); 1048 goto out_release; 1049 } 1050 ubifs_assert(c, ubifs_inode(xino)->xattr); 1051 1052 clear_nlink(xino); 1053 pack_inode(c, ino, xino, 0); 1054 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1055 iput(xino); 1056 1057 kfree(pxent); 1058 pxent = xent; 1059 key_read(c, &xent->key, &key); 1060 } 1061 kfree(pxent); 1062 } 1063 1064 pack_inode(c, ino, inode, 1); 1065 err = ubifs_node_calc_hash(c, ino, hash); 1066 if (err) 1067 goto out_release; 1068 1069 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync); 1070 if (err) 1071 goto out_release; 1072 if (!sync) 1073 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1074 inode->i_ino); 1075 release_head(c, BASEHD); 1076 1077 if (last_reference) { 1078 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1079 if (err) 1080 goto out_ro; 1081 ubifs_delete_orphan(c, inode->i_ino); 1082 err = ubifs_add_dirt(c, lnum, write_len); 1083 } else { 1084 union ubifs_key key; 1085 1086 ubifs_add_auth_dirt(c, lnum); 1087 1088 ino_key_init(c, &key, inode->i_ino); 1089 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); 1090 } 1091 if (err) 1092 goto out_ro; 1093 1094 finish_reservation(c); 1095 spin_lock(&ui->ui_lock); 1096 ui->synced_i_size = ui->ui_size; 1097 spin_unlock(&ui->ui_lock); 1098 kfree(ino_start); 1099 return 0; 1100 1101 out_release: 1102 release_head(c, BASEHD); 1103 out_ro: 1104 ubifs_ro_mode(c, err); 1105 finish_reservation(c); 1106 out_free: 1107 kfree(ino_start); 1108 return err; 1109 } 1110 1111 /** 1112 * ubifs_jnl_delete_inode - delete an inode. 1113 * @c: UBIFS file-system description object 1114 * @inode: inode to delete 1115 * 1116 * This function deletes inode @inode which includes removing it from orphans, 1117 * deleting it from TNC and, in some cases, writing a deletion inode to the 1118 * journal. 1119 * 1120 * When regular file inodes are unlinked or a directory inode is removed, the 1121 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and 1122 * direntry to the media, and adds the inode to orphans. After this, when the 1123 * last reference to this inode has been dropped, this function is called. In 1124 * general, it has to write one more deletion inode to the media, because if 1125 * a commit happened between 'ubifs_jnl_update()' and 1126 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal 1127 * anymore, and in fact it might not be on the flash anymore, because it might 1128 * have been garbage-collected already. And for optimization reasons UBIFS does 1129 * not read the orphan area if it has been unmounted cleanly, so it would have 1130 * no indication in the journal that there is a deleted inode which has to be 1131 * removed from TNC. 1132 * 1133 * However, if there was no commit between 'ubifs_jnl_update()' and 1134 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion 1135 * inode to the media for the second time. And this is quite a typical case. 1136 * 1137 * This function returns zero in case of success and a negative error code in 1138 * case of failure. 1139 */ 1140 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) 1141 { 1142 int err; 1143 struct ubifs_inode *ui = ubifs_inode(inode); 1144 1145 ubifs_assert(c, inode->i_nlink == 0); 1146 1147 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no) 1148 /* A commit happened for sure or inode hosts xattrs */ 1149 return ubifs_jnl_write_inode(c, inode); 1150 1151 down_read(&c->commit_sem); 1152 /* 1153 * Check commit number again, because the first test has been done 1154 * without @c->commit_sem, so a commit might have happened. 1155 */ 1156 if (ui->del_cmtno != c->cmt_no) { 1157 up_read(&c->commit_sem); 1158 return ubifs_jnl_write_inode(c, inode); 1159 } 1160 1161 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1162 if (err) 1163 ubifs_ro_mode(c, err); 1164 else 1165 ubifs_delete_orphan(c, inode->i_ino); 1166 up_read(&c->commit_sem); 1167 return err; 1168 } 1169 1170 /** 1171 * ubifs_jnl_xrename - cross rename two directory entries. 1172 * @c: UBIFS file-system description object 1173 * @fst_dir: parent inode of 1st directory entry to exchange 1174 * @fst_inode: 1st inode to exchange 1175 * @fst_nm: name of 1st inode to exchange 1176 * @snd_dir: parent inode of 2nd directory entry to exchange 1177 * @snd_inode: 2nd inode to exchange 1178 * @snd_nm: name of 2nd inode to exchange 1179 * @sync: non-zero if the write-buffer has to be synchronized 1180 * 1181 * This function implements the cross rename operation which may involve 1182 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean 1183 * and returns zero on success. In case of failure, a negative error code is 1184 * returned. 1185 */ 1186 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, 1187 const struct inode *fst_inode, 1188 const struct fscrypt_name *fst_nm, 1189 const struct inode *snd_dir, 1190 const struct inode *snd_inode, 1191 const struct fscrypt_name *snd_nm, int sync) 1192 { 1193 union ubifs_key key; 1194 struct ubifs_dent_node *dent1, *dent2; 1195 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ; 1196 int aligned_dlen1, aligned_dlen2; 1197 int twoparents = (fst_dir != snd_dir); 1198 void *p; 1199 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1200 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1201 u8 hash_p1[UBIFS_HASH_ARR_SZ]; 1202 u8 hash_p2[UBIFS_HASH_ARR_SZ]; 1203 1204 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); 1205 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); 1206 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); 1207 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); 1208 1209 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1; 1210 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1; 1211 aligned_dlen1 = ALIGN(dlen1, 8); 1212 aligned_dlen2 = ALIGN(dlen2, 8); 1213 1214 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8); 1215 if (twoparents) 1216 len += plen; 1217 1218 len += ubifs_auth_node_sz(c); 1219 1220 dent1 = kzalloc(len, GFP_NOFS); 1221 if (!dent1) 1222 return -ENOMEM; 1223 1224 /* Make reservation before allocating sequence numbers */ 1225 err = make_reservation(c, BASEHD, len); 1226 if (err) 1227 goto out_free; 1228 1229 /* Make new dent for 1st entry */ 1230 dent1->ch.node_type = UBIFS_DENT_NODE; 1231 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm); 1232 dent1->inum = cpu_to_le64(fst_inode->i_ino); 1233 dent1->type = get_dent_type(fst_inode->i_mode); 1234 dent1->nlen = cpu_to_le16(fname_len(snd_nm)); 1235 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm)); 1236 dent1->name[fname_len(snd_nm)] = '\0'; 1237 set_dent_cookie(c, dent1); 1238 zero_dent_node_unused(dent1); 1239 ubifs_prep_grp_node(c, dent1, dlen1, 0); 1240 err = ubifs_node_calc_hash(c, dent1, hash_dent1); 1241 if (err) 1242 goto out_release; 1243 1244 /* Make new dent for 2nd entry */ 1245 dent2 = (void *)dent1 + aligned_dlen1; 1246 dent2->ch.node_type = UBIFS_DENT_NODE; 1247 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm); 1248 dent2->inum = cpu_to_le64(snd_inode->i_ino); 1249 dent2->type = get_dent_type(snd_inode->i_mode); 1250 dent2->nlen = cpu_to_le16(fname_len(fst_nm)); 1251 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm)); 1252 dent2->name[fname_len(fst_nm)] = '\0'; 1253 set_dent_cookie(c, dent2); 1254 zero_dent_node_unused(dent2); 1255 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1256 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1257 if (err) 1258 goto out_release; 1259 1260 p = (void *)dent2 + aligned_dlen2; 1261 if (!twoparents) { 1262 pack_inode(c, p, fst_dir, 1); 1263 err = ubifs_node_calc_hash(c, p, hash_p1); 1264 if (err) 1265 goto out_release; 1266 } else { 1267 pack_inode(c, p, fst_dir, 0); 1268 err = ubifs_node_calc_hash(c, p, hash_p1); 1269 if (err) 1270 goto out_release; 1271 p += ALIGN(plen, 8); 1272 pack_inode(c, p, snd_dir, 1); 1273 err = ubifs_node_calc_hash(c, p, hash_p2); 1274 if (err) 1275 goto out_release; 1276 } 1277 1278 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); 1279 if (err) 1280 goto out_release; 1281 if (!sync) { 1282 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1283 1284 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino); 1285 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino); 1286 } 1287 release_head(c, BASEHD); 1288 1289 ubifs_add_auth_dirt(c, lnum); 1290 1291 dent_key_init(c, &key, snd_dir->i_ino, snd_nm); 1292 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm); 1293 if (err) 1294 goto out_ro; 1295 1296 offs += aligned_dlen1; 1297 dent_key_init(c, &key, fst_dir->i_ino, fst_nm); 1298 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm); 1299 if (err) 1300 goto out_ro; 1301 1302 offs += aligned_dlen2; 1303 1304 ino_key_init(c, &key, fst_dir->i_ino); 1305 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1); 1306 if (err) 1307 goto out_ro; 1308 1309 if (twoparents) { 1310 offs += ALIGN(plen, 8); 1311 ino_key_init(c, &key, snd_dir->i_ino); 1312 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2); 1313 if (err) 1314 goto out_ro; 1315 } 1316 1317 finish_reservation(c); 1318 1319 mark_inode_clean(c, ubifs_inode(fst_dir)); 1320 if (twoparents) 1321 mark_inode_clean(c, ubifs_inode(snd_dir)); 1322 kfree(dent1); 1323 return 0; 1324 1325 out_release: 1326 release_head(c, BASEHD); 1327 out_ro: 1328 ubifs_ro_mode(c, err); 1329 finish_reservation(c); 1330 out_free: 1331 kfree(dent1); 1332 return err; 1333 } 1334 1335 /** 1336 * ubifs_jnl_rename - rename a directory entry. 1337 * @c: UBIFS file-system description object 1338 * @old_dir: parent inode of directory entry to rename 1339 * @old_inode: directory entry's inode to rename 1340 * @old_nm: name of the old directory entry to rename 1341 * @new_dir: parent inode of directory entry to rename 1342 * @new_inode: new directory entry's inode (or directory entry's inode to 1343 * replace) 1344 * @new_nm: new name of the new directory entry 1345 * @whiteout: whiteout inode 1346 * @sync: non-zero if the write-buffer has to be synchronized 1347 * @delete_orphan: indicates an orphan entry deletion for @whiteout 1348 * 1349 * This function implements the re-name operation which may involve writing up 1350 * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes) 1351 * and 2 directory entries. It marks the written inodes as clean and returns 1352 * zero on success. In case of failure, a negative error code is returned. 1353 */ 1354 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, 1355 const struct inode *old_inode, 1356 const struct fscrypt_name *old_nm, 1357 const struct inode *new_dir, 1358 const struct inode *new_inode, 1359 const struct fscrypt_name *new_nm, 1360 const struct inode *whiteout, int sync, int delete_orphan) 1361 { 1362 void *p; 1363 union ubifs_key key; 1364 struct ubifs_dent_node *dent, *dent2; 1365 int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0; 1366 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; 1367 int last_reference = !!(new_inode && new_inode->i_nlink == 0); 1368 int move = (old_dir != new_dir); 1369 struct ubifs_inode *new_ui, *whiteout_ui; 1370 u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; 1371 u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; 1372 u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; 1373 u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ]; 1374 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1375 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1376 1377 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); 1378 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); 1379 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); 1380 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); 1381 1382 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1; 1383 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1; 1384 if (new_inode) { 1385 new_ui = ubifs_inode(new_inode); 1386 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex)); 1387 ilen = UBIFS_INO_NODE_SZ; 1388 if (!last_reference) 1389 ilen += new_ui->data_len; 1390 } else 1391 ilen = 0; 1392 1393 if (whiteout) { 1394 whiteout_ui = ubifs_inode(whiteout); 1395 ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex)); 1396 ubifs_assert(c, whiteout->i_nlink == 1); 1397 ubifs_assert(c, !whiteout_ui->dirty); 1398 wlen = UBIFS_INO_NODE_SZ; 1399 wlen += whiteout_ui->data_len; 1400 } else 1401 wlen = 0; 1402 1403 aligned_dlen1 = ALIGN(dlen1, 8); 1404 aligned_dlen2 = ALIGN(dlen2, 8); 1405 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + 1406 ALIGN(wlen, 8) + ALIGN(plen, 8); 1407 if (move) 1408 len += plen; 1409 1410 len += ubifs_auth_node_sz(c); 1411 1412 dent = kzalloc(len, GFP_NOFS); 1413 if (!dent) 1414 return -ENOMEM; 1415 1416 /* Make reservation before allocating sequence numbers */ 1417 err = make_reservation(c, BASEHD, len); 1418 if (err) 1419 goto out_free; 1420 1421 /* Make new dent */ 1422 dent->ch.node_type = UBIFS_DENT_NODE; 1423 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm); 1424 dent->inum = cpu_to_le64(old_inode->i_ino); 1425 dent->type = get_dent_type(old_inode->i_mode); 1426 dent->nlen = cpu_to_le16(fname_len(new_nm)); 1427 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm)); 1428 dent->name[fname_len(new_nm)] = '\0'; 1429 set_dent_cookie(c, dent); 1430 zero_dent_node_unused(dent); 1431 ubifs_prep_grp_node(c, dent, dlen1, 0); 1432 err = ubifs_node_calc_hash(c, dent, hash_dent1); 1433 if (err) 1434 goto out_release; 1435 1436 dent2 = (void *)dent + aligned_dlen1; 1437 dent2->ch.node_type = UBIFS_DENT_NODE; 1438 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm); 1439 1440 if (whiteout) { 1441 dent2->inum = cpu_to_le64(whiteout->i_ino); 1442 dent2->type = get_dent_type(whiteout->i_mode); 1443 } else { 1444 /* Make deletion dent */ 1445 dent2->inum = 0; 1446 dent2->type = DT_UNKNOWN; 1447 } 1448 dent2->nlen = cpu_to_le16(fname_len(old_nm)); 1449 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm)); 1450 dent2->name[fname_len(old_nm)] = '\0'; 1451 set_dent_cookie(c, dent2); 1452 zero_dent_node_unused(dent2); 1453 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1454 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1455 if (err) 1456 goto out_release; 1457 1458 p = (void *)dent2 + aligned_dlen2; 1459 if (new_inode) { 1460 pack_inode(c, p, new_inode, 0); 1461 err = ubifs_node_calc_hash(c, p, hash_new_inode); 1462 if (err) 1463 goto out_release; 1464 1465 p += ALIGN(ilen, 8); 1466 } 1467 1468 if (whiteout) { 1469 pack_inode(c, p, whiteout, 0); 1470 err = ubifs_node_calc_hash(c, p, hash_whiteout_inode); 1471 if (err) 1472 goto out_release; 1473 1474 p += ALIGN(wlen, 8); 1475 } 1476 1477 if (!move) { 1478 pack_inode(c, p, old_dir, 1); 1479 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1480 if (err) 1481 goto out_release; 1482 } else { 1483 pack_inode(c, p, old_dir, 0); 1484 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1485 if (err) 1486 goto out_release; 1487 1488 p += ALIGN(plen, 8); 1489 pack_inode(c, p, new_dir, 1); 1490 err = ubifs_node_calc_hash(c, p, hash_new_dir); 1491 if (err) 1492 goto out_release; 1493 } 1494 1495 if (last_reference) { 1496 err = ubifs_add_orphan(c, new_inode->i_ino); 1497 if (err) { 1498 release_head(c, BASEHD); 1499 goto out_finish; 1500 } 1501 new_ui->del_cmtno = c->cmt_no; 1502 orphan_added = 1; 1503 } 1504 1505 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); 1506 if (err) 1507 goto out_release; 1508 if (!sync) { 1509 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1510 1511 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino); 1512 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino); 1513 if (new_inode) 1514 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1515 new_inode->i_ino); 1516 if (whiteout) 1517 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1518 whiteout->i_ino); 1519 } 1520 release_head(c, BASEHD); 1521 1522 ubifs_add_auth_dirt(c, lnum); 1523 1524 dent_key_init(c, &key, new_dir->i_ino, new_nm); 1525 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm); 1526 if (err) 1527 goto out_ro; 1528 1529 offs += aligned_dlen1; 1530 if (whiteout) { 1531 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1532 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); 1533 if (err) 1534 goto out_ro; 1535 } else { 1536 err = ubifs_add_dirt(c, lnum, dlen2); 1537 if (err) 1538 goto out_ro; 1539 1540 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1541 err = ubifs_tnc_remove_nm(c, &key, old_nm); 1542 if (err) 1543 goto out_ro; 1544 } 1545 1546 offs += aligned_dlen2; 1547 if (new_inode) { 1548 ino_key_init(c, &key, new_inode->i_ino); 1549 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode); 1550 if (err) 1551 goto out_ro; 1552 offs += ALIGN(ilen, 8); 1553 } 1554 1555 if (whiteout) { 1556 ino_key_init(c, &key, whiteout->i_ino); 1557 err = ubifs_tnc_add(c, &key, lnum, offs, wlen, 1558 hash_whiteout_inode); 1559 if (err) 1560 goto out_ro; 1561 offs += ALIGN(wlen, 8); 1562 } 1563 1564 ino_key_init(c, &key, old_dir->i_ino); 1565 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); 1566 if (err) 1567 goto out_ro; 1568 1569 if (move) { 1570 offs += ALIGN(plen, 8); 1571 ino_key_init(c, &key, new_dir->i_ino); 1572 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir); 1573 if (err) 1574 goto out_ro; 1575 } 1576 1577 if (delete_orphan) 1578 ubifs_delete_orphan(c, whiteout->i_ino); 1579 1580 finish_reservation(c); 1581 if (new_inode) { 1582 mark_inode_clean(c, new_ui); 1583 spin_lock(&new_ui->ui_lock); 1584 new_ui->synced_i_size = new_ui->ui_size; 1585 spin_unlock(&new_ui->ui_lock); 1586 } 1587 /* 1588 * No need to mark whiteout inode clean. 1589 * Whiteout doesn't have non-zero size, no need to update 1590 * synced_i_size for whiteout_ui. 1591 */ 1592 mark_inode_clean(c, ubifs_inode(old_dir)); 1593 if (move) 1594 mark_inode_clean(c, ubifs_inode(new_dir)); 1595 kfree(dent); 1596 return 0; 1597 1598 out_release: 1599 release_head(c, BASEHD); 1600 out_ro: 1601 ubifs_ro_mode(c, err); 1602 if (orphan_added) 1603 ubifs_delete_orphan(c, new_inode->i_ino); 1604 out_finish: 1605 finish_reservation(c); 1606 out_free: 1607 kfree(dent); 1608 return err; 1609 } 1610 1611 /** 1612 * truncate_data_node - re-compress/encrypt a truncated data node. 1613 * @c: UBIFS file-system description object 1614 * @inode: inode which refers to the data node 1615 * @block: data block number 1616 * @dn: data node to re-compress 1617 * @new_len: new length 1618 * @dn_size: size of the data node @dn in memory 1619 * 1620 * This function is used when an inode is truncated and the last data node of 1621 * the inode has to be re-compressed/encrypted and re-written. 1622 */ 1623 static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode, 1624 unsigned int block, struct ubifs_data_node *dn, 1625 int *new_len, int dn_size) 1626 { 1627 void *buf; 1628 int err, dlen, compr_type, out_len, data_size; 1629 1630 out_len = le32_to_cpu(dn->size); 1631 buf = kmalloc(out_len, GFP_NOFS); 1632 if (!buf) 1633 return -ENOMEM; 1634 1635 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; 1636 data_size = dn_size - UBIFS_DATA_NODE_SZ; 1637 compr_type = le16_to_cpu(dn->compr_type); 1638 1639 if (IS_ENCRYPTED(inode)) { 1640 err = ubifs_decrypt(inode, dn, &dlen, block); 1641 if (err) 1642 goto out; 1643 } 1644 1645 if (compr_type == UBIFS_COMPR_NONE) { 1646 out_len = *new_len; 1647 } else { 1648 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); 1649 if (err) 1650 goto out; 1651 1652 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); 1653 } 1654 1655 if (IS_ENCRYPTED(inode)) { 1656 err = ubifs_encrypt(inode, dn, out_len, &data_size, block); 1657 if (err) 1658 goto out; 1659 1660 out_len = data_size; 1661 } else { 1662 dn->compr_size = 0; 1663 } 1664 1665 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE); 1666 dn->compr_type = cpu_to_le16(compr_type); 1667 dn->size = cpu_to_le32(*new_len); 1668 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1669 err = 0; 1670 out: 1671 kfree(buf); 1672 return err; 1673 } 1674 1675 /** 1676 * ubifs_jnl_truncate - update the journal for a truncation. 1677 * @c: UBIFS file-system description object 1678 * @inode: inode to truncate 1679 * @old_size: old size 1680 * @new_size: new size 1681 * 1682 * When the size of a file decreases due to truncation, a truncation node is 1683 * written, the journal tree is updated, and the last data block is re-written 1684 * if it has been affected. The inode is also updated in order to synchronize 1685 * the new inode size. 1686 * 1687 * This function marks the inode as clean and returns zero on success. In case 1688 * of failure, a negative error code is returned. 1689 */ 1690 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, 1691 loff_t old_size, loff_t new_size) 1692 { 1693 union ubifs_key key, to_key; 1694 struct ubifs_ino_node *ino; 1695 struct ubifs_trun_node *trun; 1696 struct ubifs_data_node *dn; 1697 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); 1698 int dn_size; 1699 struct ubifs_inode *ui = ubifs_inode(inode); 1700 ino_t inum = inode->i_ino; 1701 unsigned int blk; 1702 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 1703 u8 hash_dn[UBIFS_HASH_ARR_SZ]; 1704 1705 dbg_jnl("ino %lu, size %lld -> %lld", 1706 (unsigned long)inum, old_size, new_size); 1707 ubifs_assert(c, !ui->data_len); 1708 ubifs_assert(c, S_ISREG(inode->i_mode)); 1709 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 1710 1711 dn_size = COMPRESSED_DATA_NODE_BUF_SZ; 1712 1713 if (IS_ENCRYPTED(inode)) 1714 dn_size += UBIFS_CIPHER_BLOCK_SIZE; 1715 1716 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + 1717 dn_size + ubifs_auth_node_sz(c); 1718 1719 ino = kmalloc(sz, GFP_NOFS); 1720 if (!ino) 1721 return -ENOMEM; 1722 1723 trun = (void *)ino + UBIFS_INO_NODE_SZ; 1724 trun->ch.node_type = UBIFS_TRUN_NODE; 1725 trun->inum = cpu_to_le32(inum); 1726 trun->old_size = cpu_to_le64(old_size); 1727 trun->new_size = cpu_to_le64(new_size); 1728 zero_trun_node_unused(trun); 1729 1730 dlen = new_size & (UBIFS_BLOCK_SIZE - 1); 1731 if (dlen) { 1732 /* Get last data block so it can be truncated */ 1733 dn = (void *)trun + UBIFS_TRUN_NODE_SZ; 1734 blk = new_size >> UBIFS_BLOCK_SHIFT; 1735 data_key_init(c, &key, inum, blk); 1736 dbg_jnlk(&key, "last block key "); 1737 err = ubifs_tnc_lookup(c, &key, dn); 1738 if (err == -ENOENT) 1739 dlen = 0; /* Not found (so it is a hole) */ 1740 else if (err) 1741 goto out_free; 1742 else { 1743 int dn_len = le32_to_cpu(dn->size); 1744 1745 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { 1746 ubifs_err(c, "bad data node (block %u, inode %lu)", 1747 blk, inode->i_ino); 1748 ubifs_dump_node(c, dn, dn_size); 1749 err = -EUCLEAN; 1750 goto out_free; 1751 } 1752 1753 if (dn_len <= dlen) 1754 dlen = 0; /* Nothing to do */ 1755 else { 1756 err = truncate_data_node(c, inode, blk, dn, 1757 &dlen, dn_size); 1758 if (err) 1759 goto out_free; 1760 } 1761 } 1762 } 1763 1764 /* Must make reservation before allocating sequence numbers */ 1765 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ; 1766 1767 if (ubifs_authenticated(c)) 1768 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c); 1769 else 1770 len += dlen; 1771 1772 err = make_reservation(c, BASEHD, len); 1773 if (err) 1774 goto out_free; 1775 1776 pack_inode(c, ino, inode, 0); 1777 err = ubifs_node_calc_hash(c, ino, hash_ino); 1778 if (err) 1779 goto out_release; 1780 1781 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); 1782 if (dlen) { 1783 ubifs_prep_grp_node(c, dn, dlen, 1); 1784 err = ubifs_node_calc_hash(c, dn, hash_dn); 1785 if (err) 1786 goto out_release; 1787 } 1788 1789 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); 1790 if (err) 1791 goto out_release; 1792 if (!sync) 1793 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); 1794 release_head(c, BASEHD); 1795 1796 ubifs_add_auth_dirt(c, lnum); 1797 1798 if (dlen) { 1799 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ; 1800 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn); 1801 if (err) 1802 goto out_ro; 1803 } 1804 1805 ino_key_init(c, &key, inum); 1806 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino); 1807 if (err) 1808 goto out_ro; 1809 1810 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ); 1811 if (err) 1812 goto out_ro; 1813 1814 bit = new_size & (UBIFS_BLOCK_SIZE - 1); 1815 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0); 1816 data_key_init(c, &key, inum, blk); 1817 1818 bit = old_size & (UBIFS_BLOCK_SIZE - 1); 1819 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); 1820 data_key_init(c, &to_key, inum, blk); 1821 1822 err = ubifs_tnc_remove_range(c, &key, &to_key); 1823 if (err) 1824 goto out_ro; 1825 1826 finish_reservation(c); 1827 spin_lock(&ui->ui_lock); 1828 ui->synced_i_size = ui->ui_size; 1829 spin_unlock(&ui->ui_lock); 1830 mark_inode_clean(c, ui); 1831 kfree(ino); 1832 return 0; 1833 1834 out_release: 1835 release_head(c, BASEHD); 1836 out_ro: 1837 ubifs_ro_mode(c, err); 1838 finish_reservation(c); 1839 out_free: 1840 kfree(ino); 1841 return err; 1842 } 1843 1844 1845 /** 1846 * ubifs_jnl_delete_xattr - delete an extended attribute. 1847 * @c: UBIFS file-system description object 1848 * @host: host inode 1849 * @inode: extended attribute inode 1850 * @nm: extended attribute entry name 1851 * 1852 * This function delete an extended attribute which is very similar to 1853 * un-linking regular files - it writes a deletion xentry, a deletion inode and 1854 * updates the target inode. Returns zero in case of success and a negative 1855 * error code in case of failure. 1856 */ 1857 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, 1858 const struct inode *inode, 1859 const struct fscrypt_name *nm) 1860 { 1861 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len; 1862 struct ubifs_dent_node *xent; 1863 struct ubifs_ino_node *ino; 1864 union ubifs_key xent_key, key1, key2; 1865 int sync = IS_DIRSYNC(host); 1866 struct ubifs_inode *host_ui = ubifs_inode(host); 1867 u8 hash[UBIFS_HASH_ARR_SZ]; 1868 1869 ubifs_assert(c, inode->i_nlink == 0); 1870 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1871 1872 /* 1873 * Since we are deleting the inode, we do not bother to attach any data 1874 * to it and assume its length is %UBIFS_INO_NODE_SZ. 1875 */ 1876 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 1877 aligned_xlen = ALIGN(xlen, 8); 1878 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; 1879 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); 1880 1881 write_len = len + ubifs_auth_node_sz(c); 1882 1883 xent = kzalloc(write_len, GFP_NOFS); 1884 if (!xent) 1885 return -ENOMEM; 1886 1887 /* Make reservation before allocating sequence numbers */ 1888 err = make_reservation(c, BASEHD, write_len); 1889 if (err) { 1890 kfree(xent); 1891 return err; 1892 } 1893 1894 xent->ch.node_type = UBIFS_XENT_NODE; 1895 xent_key_init(c, &xent_key, host->i_ino, nm); 1896 key_write(c, &xent_key, xent->key); 1897 xent->inum = 0; 1898 xent->type = get_dent_type(inode->i_mode); 1899 xent->nlen = cpu_to_le16(fname_len(nm)); 1900 memcpy(xent->name, fname_name(nm), fname_len(nm)); 1901 xent->name[fname_len(nm)] = '\0'; 1902 zero_dent_node_unused(xent); 1903 ubifs_prep_grp_node(c, xent, xlen, 0); 1904 1905 ino = (void *)xent + aligned_xlen; 1906 pack_inode(c, ino, inode, 0); 1907 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1908 pack_inode(c, ino, host, 1); 1909 err = ubifs_node_calc_hash(c, ino, hash); 1910 if (err) 1911 goto out_release; 1912 1913 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync); 1914 if (!sync && !err) 1915 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); 1916 release_head(c, BASEHD); 1917 1918 ubifs_add_auth_dirt(c, lnum); 1919 kfree(xent); 1920 if (err) 1921 goto out_ro; 1922 1923 /* Remove the extended attribute entry from TNC */ 1924 err = ubifs_tnc_remove_nm(c, &xent_key, nm); 1925 if (err) 1926 goto out_ro; 1927 err = ubifs_add_dirt(c, lnum, xlen); 1928 if (err) 1929 goto out_ro; 1930 1931 /* 1932 * Remove all nodes belonging to the extended attribute inode from TNC. 1933 * Well, there actually must be only one node - the inode itself. 1934 */ 1935 lowest_ino_key(c, &key1, inode->i_ino); 1936 highest_ino_key(c, &key2, inode->i_ino); 1937 err = ubifs_tnc_remove_range(c, &key1, &key2); 1938 if (err) 1939 goto out_ro; 1940 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ); 1941 if (err) 1942 goto out_ro; 1943 1944 /* And update TNC with the new host inode position */ 1945 ino_key_init(c, &key1, host->i_ino); 1946 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash); 1947 if (err) 1948 goto out_ro; 1949 1950 finish_reservation(c); 1951 spin_lock(&host_ui->ui_lock); 1952 host_ui->synced_i_size = host_ui->ui_size; 1953 spin_unlock(&host_ui->ui_lock); 1954 mark_inode_clean(c, host_ui); 1955 return 0; 1956 1957 out_release: 1958 kfree(xent); 1959 release_head(c, BASEHD); 1960 out_ro: 1961 ubifs_ro_mode(c, err); 1962 finish_reservation(c); 1963 return err; 1964 } 1965 1966 /** 1967 * ubifs_jnl_change_xattr - change an extended attribute. 1968 * @c: UBIFS file-system description object 1969 * @inode: extended attribute inode 1970 * @host: host inode 1971 * 1972 * This function writes the updated version of an extended attribute inode and 1973 * the host inode to the journal (to the base head). The host inode is written 1974 * after the extended attribute inode in order to guarantee that the extended 1975 * attribute will be flushed when the inode is synchronized by 'fsync()' and 1976 * consequently, the write-buffer is synchronized. This function returns zero 1977 * in case of success and a negative error code in case of failure. 1978 */ 1979 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, 1980 const struct inode *host) 1981 { 1982 int err, len1, len2, aligned_len, aligned_len1, lnum, offs; 1983 struct ubifs_inode *host_ui = ubifs_inode(host); 1984 struct ubifs_ino_node *ino; 1985 union ubifs_key key; 1986 int sync = IS_DIRSYNC(host); 1987 u8 hash_host[UBIFS_HASH_ARR_SZ]; 1988 u8 hash[UBIFS_HASH_ARR_SZ]; 1989 1990 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); 1991 ubifs_assert(c, inode->i_nlink > 0); 1992 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1993 1994 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len; 1995 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len; 1996 aligned_len1 = ALIGN(len1, 8); 1997 aligned_len = aligned_len1 + ALIGN(len2, 8); 1998 1999 aligned_len += ubifs_auth_node_sz(c); 2000 2001 ino = kzalloc(aligned_len, GFP_NOFS); 2002 if (!ino) 2003 return -ENOMEM; 2004 2005 /* Make reservation before allocating sequence numbers */ 2006 err = make_reservation(c, BASEHD, aligned_len); 2007 if (err) 2008 goto out_free; 2009 2010 pack_inode(c, ino, host, 0); 2011 err = ubifs_node_calc_hash(c, ino, hash_host); 2012 if (err) 2013 goto out_release; 2014 pack_inode(c, (void *)ino + aligned_len1, inode, 1); 2015 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash); 2016 if (err) 2017 goto out_release; 2018 2019 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); 2020 if (!sync && !err) { 2021 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 2022 2023 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino); 2024 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 2025 } 2026 release_head(c, BASEHD); 2027 if (err) 2028 goto out_ro; 2029 2030 ubifs_add_auth_dirt(c, lnum); 2031 2032 ino_key_init(c, &key, host->i_ino); 2033 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host); 2034 if (err) 2035 goto out_ro; 2036 2037 ino_key_init(c, &key, inode->i_ino); 2038 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash); 2039 if (err) 2040 goto out_ro; 2041 2042 finish_reservation(c); 2043 spin_lock(&host_ui->ui_lock); 2044 host_ui->synced_i_size = host_ui->ui_size; 2045 spin_unlock(&host_ui->ui_lock); 2046 mark_inode_clean(c, host_ui); 2047 kfree(ino); 2048 return 0; 2049 2050 out_release: 2051 release_head(c, BASEHD); 2052 out_ro: 2053 ubifs_ro_mode(c, err); 2054 finish_reservation(c); 2055 out_free: 2056 kfree(ino); 2057 return err; 2058 } 2059 2060