1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of UBIFS. 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation. 6 * 7 * Authors: Artem Bityutskiy (Битюцкий Артём) 8 * Adrian Hunter 9 */ 10 11 /* 12 * This file implements UBIFS journal. 13 * 14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed 15 * length and position, while a bud logical eraseblock is any LEB in the main 16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log 17 * contains only references to buds and some other stuff like commit 18 * start node. The idea is that when we commit the journal, we do 19 * not copy the data, the buds just become indexed. Since after the commit the 20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we 21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will 22 * become leafs in the future. 23 * 24 * The journal is multi-headed because we want to write data to the journal as 25 * optimally as possible. It is nice to have nodes belonging to the same inode 26 * in one LEB, so we may write data owned by different inodes to different 27 * journal heads, although at present only one data head is used. 28 * 29 * For recovery reasons, the base head contains all inode nodes, all directory 30 * entry nodes and all truncate nodes. This means that the other heads contain 31 * only data nodes. 32 * 33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the 34 * time of commit, the bud is retained to continue to be used in the journal, 35 * even though the "front" of the LEB is now indexed. In that case, the log 36 * reference contains the offset where the bud starts for the purposes of the 37 * journal. 38 * 39 * The journal size has to be limited, because the larger is the journal, the 40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it 41 * takes (indexing in the TNC). 42 * 43 * All the journal write operations like 'ubifs_jnl_update()' here, which write 44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to 45 * unclean reboots. Should the unclean reboot happen, the recovery code drops 46 * all the nodes. 47 */ 48 49 #include "ubifs.h" 50 51 /** 52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node. 53 * @ino: the inode to zero out 54 */ 55 static inline void zero_ino_node_unused(struct ubifs_ino_node *ino) 56 { 57 memset(ino->padding1, 0, 4); 58 memset(ino->padding2, 0, 26); 59 } 60 61 /** 62 * zero_dent_node_unused - zero out unused fields of an on-flash directory 63 * entry node. 64 * @dent: the directory entry to zero out 65 */ 66 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent) 67 { 68 dent->padding1 = 0; 69 } 70 71 /** 72 * zero_trun_node_unused - zero out unused fields of an on-flash truncation 73 * node. 74 * @trun: the truncation node to zero out 75 */ 76 static inline void zero_trun_node_unused(struct ubifs_trun_node *trun) 77 { 78 memset(trun->padding, 0, 12); 79 } 80 81 static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum) 82 { 83 if (ubifs_authenticated(c)) 84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c)); 85 } 86 87 /** 88 * reserve_space - reserve space in the journal. 89 * @c: UBIFS file-system description object 90 * @jhead: journal head number 91 * @len: node length 92 * 93 * This function reserves space in journal head @head. If the reservation 94 * succeeded, the journal head stays locked and later has to be unlocked using 95 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to 96 * be done, and other negative error codes in case of other failures. 97 */ 98 static int reserve_space(struct ubifs_info *c, int jhead, int len) 99 { 100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; 101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 102 103 /* 104 * Typically, the base head has smaller nodes written to it, so it is 105 * better to try to allocate space at the ends of eraseblocks. This is 106 * what the squeeze parameter does. 107 */ 108 ubifs_assert(c, !c->ro_media && !c->ro_mount); 109 squeeze = (jhead == BASEHD); 110 again: 111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 112 113 if (c->ro_error) { 114 err = -EROFS; 115 goto out_unlock; 116 } 117 118 avail = c->leb_size - wbuf->offs - wbuf->used; 119 if (wbuf->lnum != -1 && avail >= len) 120 return 0; 121 122 /* 123 * Write buffer wasn't seek'ed or there is no enough space - look for an 124 * LEB with some empty space. 125 */ 126 lnum = ubifs_find_free_space(c, len, &offs, squeeze); 127 if (lnum >= 0) 128 goto out; 129 130 err = lnum; 131 if (err != -ENOSPC) 132 goto out_unlock; 133 134 /* 135 * No free space, we have to run garbage collector to make 136 * some. But the write-buffer mutex has to be unlocked because 137 * GC also takes it. 138 */ 139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead)); 140 mutex_unlock(&wbuf->io_mutex); 141 142 lnum = ubifs_garbage_collect(c, 0); 143 if (lnum < 0) { 144 err = lnum; 145 if (err != -ENOSPC) 146 return err; 147 148 /* 149 * GC could not make a free LEB. But someone else may 150 * have allocated new bud for this journal head, 151 * because we dropped @wbuf->io_mutex, so try once 152 * again. 153 */ 154 dbg_jnl("GC couldn't make a free LEB for jhead %s", 155 dbg_jhead(jhead)); 156 if (retries++ < 2) { 157 dbg_jnl("retry (%d)", retries); 158 goto again; 159 } 160 161 dbg_jnl("return -ENOSPC"); 162 return err; 163 } 164 165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead)); 167 avail = c->leb_size - wbuf->offs - wbuf->used; 168 169 if (wbuf->lnum != -1 && avail >= len) { 170 /* 171 * Someone else has switched the journal head and we have 172 * enough space now. This happens when more than one process is 173 * trying to write to the same journal head at the same time. 174 */ 175 dbg_jnl("return LEB %d back, already have LEB %d:%d", 176 lnum, wbuf->lnum, wbuf->offs + wbuf->used); 177 err = ubifs_return_leb(c, lnum); 178 if (err) 179 goto out_unlock; 180 return 0; 181 } 182 183 offs = 0; 184 185 out: 186 /* 187 * Make sure we synchronize the write-buffer before we add the new bud 188 * to the log. Otherwise we may have a power cut after the log 189 * reference node for the last bud (@lnum) is written but before the 190 * write-buffer data are written to the next-to-last bud 191 * (@wbuf->lnum). And the effect would be that the recovery would see 192 * that there is corruption in the next-to-last bud. 193 */ 194 err = ubifs_wbuf_sync_nolock(wbuf); 195 if (err) 196 goto out_return; 197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs); 198 if (err) 199 goto out_return; 200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs); 201 if (err) 202 goto out_unlock; 203 204 return 0; 205 206 out_unlock: 207 mutex_unlock(&wbuf->io_mutex); 208 return err; 209 210 out_return: 211 /* An error occurred and the LEB has to be returned to lprops */ 212 ubifs_assert(c, err < 0); 213 err1 = ubifs_return_leb(c, lnum); 214 if (err1 && err == -EAGAIN) 215 /* 216 * Return original error code only if it is not %-EAGAIN, 217 * which is not really an error. Otherwise, return the error 218 * code of 'ubifs_return_leb()'. 219 */ 220 err = err1; 221 mutex_unlock(&wbuf->io_mutex); 222 return err; 223 } 224 225 static int ubifs_hash_nodes(struct ubifs_info *c, void *node, 226 int len, struct shash_desc *hash) 227 { 228 int auth_node_size = ubifs_auth_node_sz(c); 229 int err; 230 231 while (1) { 232 const struct ubifs_ch *ch = node; 233 int nodelen = le32_to_cpu(ch->len); 234 235 ubifs_assert(c, len >= auth_node_size); 236 237 if (len == auth_node_size) 238 break; 239 240 ubifs_assert(c, len > nodelen); 241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC)); 242 243 err = ubifs_shash_update(c, hash, (void *)node, nodelen); 244 if (err) 245 return err; 246 247 node += ALIGN(nodelen, 8); 248 len -= ALIGN(nodelen, 8); 249 } 250 251 return ubifs_prepare_auth_node(c, node, hash); 252 } 253 254 /** 255 * write_head - write data to a journal head. 256 * @c: UBIFS file-system description object 257 * @jhead: journal head 258 * @buf: buffer to write 259 * @len: length to write 260 * @lnum: LEB number written is returned here 261 * @offs: offset written is returned here 262 * @sync: non-zero if the write-buffer has to by synchronized 263 * 264 * This function writes data to the reserved space of journal head @jhead. 265 * Returns zero in case of success and a negative error code in case of 266 * failure. 267 */ 268 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, 269 int *lnum, int *offs, int sync) 270 { 271 int err; 272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 273 274 ubifs_assert(c, jhead != GCHD); 275 276 *lnum = c->jheads[jhead].wbuf.lnum; 277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 278 dbg_jnl("jhead %s, LEB %d:%d, len %d", 279 dbg_jhead(jhead), *lnum, *offs, len); 280 281 if (ubifs_authenticated(c)) { 282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash); 283 if (err) 284 return err; 285 } 286 287 err = ubifs_wbuf_write_nolock(wbuf, buf, len); 288 if (err) 289 return err; 290 if (sync) 291 err = ubifs_wbuf_sync_nolock(wbuf); 292 return err; 293 } 294 295 /** 296 * __queue_and_wait - queue a task and wait until the task is waked up. 297 * @c: UBIFS file-system description object 298 * 299 * This function adds current task in queue and waits until the task is waked 300 * up. This function should be called with @c->reserve_space_wq locked. 301 */ 302 static void __queue_and_wait(struct ubifs_info *c) 303 { 304 DEFINE_WAIT(wait); 305 306 __add_wait_queue_entry_tail_exclusive(&c->reserve_space_wq, &wait); 307 set_current_state(TASK_UNINTERRUPTIBLE); 308 spin_unlock(&c->reserve_space_wq.lock); 309 310 schedule(); 311 finish_wait(&c->reserve_space_wq, &wait); 312 } 313 314 /** 315 * wait_for_reservation - try queuing current task to wait until waked up. 316 * @c: UBIFS file-system description object 317 * 318 * This function queues current task to wait until waked up, if queuing is 319 * started(@c->need_wait_space is not %0). Returns %true if current task is 320 * added in queue, otherwise %false is returned. 321 */ 322 static bool wait_for_reservation(struct ubifs_info *c) 323 { 324 if (likely(atomic_read(&c->need_wait_space) == 0)) 325 /* Quick path to check whether queuing is started. */ 326 return false; 327 328 spin_lock(&c->reserve_space_wq.lock); 329 if (atomic_read(&c->need_wait_space) == 0) { 330 /* Queuing is not started, don't queue current task. */ 331 spin_unlock(&c->reserve_space_wq.lock); 332 return false; 333 } 334 335 __queue_and_wait(c); 336 return true; 337 } 338 339 /** 340 * wake_up_reservation - wake up first task in queue or stop queuing. 341 * @c: UBIFS file-system description object 342 * 343 * This function wakes up the first task in queue if it exists, or stops 344 * queuing if no tasks in queue. 345 */ 346 static void wake_up_reservation(struct ubifs_info *c) 347 { 348 spin_lock(&c->reserve_space_wq.lock); 349 if (waitqueue_active(&c->reserve_space_wq)) 350 wake_up_locked(&c->reserve_space_wq); 351 else 352 /* 353 * Compared with wait_for_reservation(), set @c->need_wait_space 354 * under the protection of wait queue lock, which can avoid that 355 * @c->need_wait_space is set to 0 after new task queued. 356 */ 357 atomic_set(&c->need_wait_space, 0); 358 spin_unlock(&c->reserve_space_wq.lock); 359 } 360 361 /** 362 * wake_up_reservation - add current task in queue or start queuing. 363 * @c: UBIFS file-system description object 364 * 365 * This function starts queuing if queuing is not started, otherwise adds 366 * current task in queue. 367 */ 368 static void add_or_start_queue(struct ubifs_info *c) 369 { 370 spin_lock(&c->reserve_space_wq.lock); 371 if (atomic_cmpxchg(&c->need_wait_space, 0, 1) == 0) { 372 /* Starts queuing, task can go on directly. */ 373 spin_unlock(&c->reserve_space_wq.lock); 374 return; 375 } 376 377 /* 378 * There are at least two tasks have retried more than 32 times 379 * at certain point, first task has started queuing, just queue 380 * the left tasks. 381 */ 382 __queue_and_wait(c); 383 } 384 385 /** 386 * make_reservation - reserve journal space. 387 * @c: UBIFS file-system description object 388 * @jhead: journal head 389 * @len: how many bytes to reserve 390 * 391 * This function makes space reservation in journal head @jhead. The function 392 * takes the commit lock and locks the journal head, and the caller has to 393 * unlock the head and finish the reservation with 'finish_reservation()'. 394 * Returns zero in case of success and a negative error code in case of 395 * failure. 396 * 397 * Note, the journal head may be unlocked as soon as the data is written, while 398 * the commit lock has to be released after the data has been added to the 399 * TNC. 400 */ 401 static int make_reservation(struct ubifs_info *c, int jhead, int len) 402 { 403 int err, cmt_retries = 0, nospc_retries = 0; 404 bool blocked = wait_for_reservation(c); 405 406 again: 407 down_read(&c->commit_sem); 408 err = reserve_space(c, jhead, len); 409 if (!err) { 410 /* c->commit_sem will get released via finish_reservation(). */ 411 goto out_wake_up; 412 } 413 up_read(&c->commit_sem); 414 415 if (err == -ENOSPC) { 416 /* 417 * GC could not make any progress. We should try to commit 418 * because it could make some dirty space and GC would make 419 * progress, so make the error -EAGAIN so that the below 420 * will commit and re-try. 421 */ 422 nospc_retries++; 423 dbg_jnl("no space, retry"); 424 err = -EAGAIN; 425 } 426 427 if (err != -EAGAIN) 428 goto out; 429 430 /* 431 * -EAGAIN means that the journal is full or too large, or the above 432 * code wants to do one commit. Do this and re-try. 433 */ 434 if (cmt_retries > 128) { 435 /* 436 * This should not happen unless: 437 * 1. The journal size limitations are too tough. 438 * 2. The budgeting is incorrect. We always have to be able to 439 * write to the media, because all operations are budgeted. 440 * Deletions are not budgeted, though, but we reserve an 441 * extra LEB for them. 442 */ 443 ubifs_err(c, "stuck in space allocation, nospc_retries %d", 444 nospc_retries); 445 err = -ENOSPC; 446 goto out; 447 } else if (cmt_retries > 32) { 448 /* 449 * It's almost impossible to happen, unless there are many tasks 450 * making reservation concurrently and someone task has retried 451 * gc + commit for many times, generated available space during 452 * this period are grabbed by other tasks. 453 * But if it happens, start queuing up all tasks that will make 454 * space reservation, then there is only one task making space 455 * reservation at any time, and it can always make success under 456 * the premise of correct budgeting. 457 */ 458 ubifs_warn(c, "too many space allocation cmt_retries (%d) " 459 "nospc_retries (%d), start queuing tasks", 460 cmt_retries, nospc_retries); 461 462 if (!blocked) { 463 blocked = true; 464 add_or_start_queue(c); 465 } 466 } 467 468 dbg_jnl("-EAGAIN, commit and retry (retried %d times)", 469 cmt_retries); 470 cmt_retries += 1; 471 472 err = ubifs_run_commit(c); 473 if (err) 474 goto out_wake_up; 475 goto again; 476 477 out: 478 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", 479 len, jhead, err); 480 if (err == -ENOSPC) { 481 /* This are some budgeting problems, print useful information */ 482 down_write(&c->commit_sem); 483 dump_stack(); 484 ubifs_dump_budg(c, &c->bi); 485 ubifs_dump_lprops(c); 486 cmt_retries = dbg_check_lprops(c); 487 up_write(&c->commit_sem); 488 } 489 out_wake_up: 490 if (blocked) { 491 /* 492 * Only tasks that have ever started queuing or ever been queued 493 * can wake up other queued tasks, which can make sure that 494 * there is only one task waked up to make space reservation. 495 * For example: 496 * task A task B task C 497 * make_reservation make_reservation 498 * reserve_space // 0 499 * wake_up_reservation 500 * atomic_cmpxchg // 0, start queuing 501 * reserve_space 502 * wait_for_reservation 503 * __queue_and_wait 504 * add_wait_queue 505 * if (blocked) // false 506 * // So that task C won't be waked up to race with task B 507 */ 508 wake_up_reservation(c); 509 } 510 return err; 511 } 512 513 /** 514 * release_head - release a journal head. 515 * @c: UBIFS file-system description object 516 * @jhead: journal head 517 * 518 * This function releases journal head @jhead which was locked by 519 * the 'make_reservation()' function. It has to be called after each successful 520 * 'make_reservation()' invocation. 521 */ 522 static inline void release_head(struct ubifs_info *c, int jhead) 523 { 524 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex); 525 } 526 527 /** 528 * finish_reservation - finish a reservation. 529 * @c: UBIFS file-system description object 530 * 531 * This function finishes journal space reservation. It must be called after 532 * 'make_reservation()'. 533 */ 534 static void finish_reservation(struct ubifs_info *c) 535 { 536 up_read(&c->commit_sem); 537 } 538 539 /** 540 * get_dent_type - translate VFS inode mode to UBIFS directory entry type. 541 * @mode: inode mode 542 */ 543 static int get_dent_type(int mode) 544 { 545 switch (mode & S_IFMT) { 546 case S_IFREG: 547 return UBIFS_ITYPE_REG; 548 case S_IFDIR: 549 return UBIFS_ITYPE_DIR; 550 case S_IFLNK: 551 return UBIFS_ITYPE_LNK; 552 case S_IFBLK: 553 return UBIFS_ITYPE_BLK; 554 case S_IFCHR: 555 return UBIFS_ITYPE_CHR; 556 case S_IFIFO: 557 return UBIFS_ITYPE_FIFO; 558 case S_IFSOCK: 559 return UBIFS_ITYPE_SOCK; 560 default: 561 BUG(); 562 } 563 return 0; 564 } 565 566 /** 567 * pack_inode - pack an inode node. 568 * @c: UBIFS file-system description object 569 * @ino: buffer in which to pack inode node 570 * @inode: inode to pack 571 * @last: indicates the last node of the group 572 */ 573 static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino, 574 const struct inode *inode, int last) 575 { 576 int data_len = 0, last_reference = !inode->i_nlink; 577 struct ubifs_inode *ui = ubifs_inode(inode); 578 579 ino->ch.node_type = UBIFS_INO_NODE; 580 ino_key_init_flash(c, &ino->key, inode->i_ino); 581 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum); 582 ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode)); 583 ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode)); 584 ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode)); 585 ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 586 ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode)); 587 ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 588 ino->uid = cpu_to_le32(i_uid_read(inode)); 589 ino->gid = cpu_to_le32(i_gid_read(inode)); 590 ino->mode = cpu_to_le32(inode->i_mode); 591 ino->flags = cpu_to_le32(ui->flags); 592 ino->size = cpu_to_le64(ui->ui_size); 593 ino->nlink = cpu_to_le32(inode->i_nlink); 594 ino->compr_type = cpu_to_le16(ui->compr_type); 595 ino->data_len = cpu_to_le32(ui->data_len); 596 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt); 597 ino->xattr_size = cpu_to_le32(ui->xattr_size); 598 ino->xattr_names = cpu_to_le32(ui->xattr_names); 599 zero_ino_node_unused(ino); 600 601 /* 602 * Drop the attached data if this is a deletion inode, the data is not 603 * needed anymore. 604 */ 605 if (!last_reference) { 606 memcpy(ino->data, ui->data, ui->data_len); 607 data_len = ui->data_len; 608 } 609 610 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last); 611 } 612 613 /** 614 * mark_inode_clean - mark UBIFS inode as clean. 615 * @c: UBIFS file-system description object 616 * @ui: UBIFS inode to mark as clean 617 * 618 * This helper function marks UBIFS inode @ui as clean by cleaning the 619 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the 620 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would 621 * just do nothing. 622 */ 623 static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui) 624 { 625 if (ui->dirty) 626 ubifs_release_dirty_inode_budget(c, ui); 627 ui->dirty = 0; 628 } 629 630 static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent) 631 { 632 if (c->double_hash) 633 dent->cookie = (__force __le32) get_random_u32(); 634 else 635 dent->cookie = 0; 636 } 637 638 /** 639 * ubifs_jnl_update - update inode. 640 * @c: UBIFS file-system description object 641 * @dir: parent inode or host inode in case of extended attributes 642 * @nm: directory entry name 643 * @inode: inode to update 644 * @deletion: indicates a directory entry deletion i.e unlink or rmdir 645 * @xent: non-zero if the directory entry is an extended attribute entry 646 * 647 * This function updates an inode by writing a directory entry (or extended 648 * attribute entry), the inode itself, and the parent directory inode (or the 649 * host inode) to the journal. 650 * 651 * The function writes the host inode @dir last, which is important in case of 652 * extended attributes. Indeed, then we guarantee that if the host inode gets 653 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed, 654 * the extended attribute inode gets flushed too. And this is exactly what the 655 * user expects - synchronizing the host inode synchronizes its extended 656 * attributes. Similarly, this guarantees that if @dir is synchronized, its 657 * directory entry corresponding to @nm gets synchronized too. 658 * 659 * If the inode (@inode) or the parent directory (@dir) are synchronous, this 660 * function synchronizes the write-buffer. 661 * 662 * This function marks the @dir and @inode inodes as clean and returns zero on 663 * success. In case of failure, a negative error code is returned. 664 */ 665 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, 666 const struct fscrypt_name *nm, const struct inode *inode, 667 int deletion, int xent) 668 { 669 int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; 670 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); 671 int last_reference = !!(deletion && inode->i_nlink == 0); 672 struct ubifs_inode *ui = ubifs_inode(inode); 673 struct ubifs_inode *host_ui = ubifs_inode(dir); 674 struct ubifs_dent_node *dent; 675 struct ubifs_ino_node *ino; 676 union ubifs_key dent_key, ino_key; 677 u8 hash_dent[UBIFS_HASH_ARR_SZ]; 678 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 679 u8 hash_ino_host[UBIFS_HASH_ARR_SZ]; 680 681 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 682 683 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 684 ilen = UBIFS_INO_NODE_SZ; 685 686 /* 687 * If the last reference to the inode is being deleted, then there is 688 * no need to attach and write inode data, it is being deleted anyway. 689 * And if the inode is being deleted, no need to synchronize 690 * write-buffer even if the inode is synchronous. 691 */ 692 if (!last_reference) { 693 ilen += ui->data_len; 694 sync |= IS_SYNC(inode); 695 } 696 697 aligned_dlen = ALIGN(dlen, 8); 698 aligned_ilen = ALIGN(ilen, 8); 699 700 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; 701 /* Make sure to also account for extended attributes */ 702 if (ubifs_authenticated(c)) 703 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c); 704 else 705 len += host_ui->data_len; 706 707 dent = kzalloc(len, GFP_NOFS); 708 if (!dent) 709 return -ENOMEM; 710 711 /* Make reservation before allocating sequence numbers */ 712 err = make_reservation(c, BASEHD, len); 713 if (err) 714 goto out_free; 715 716 if (!xent) { 717 dent->ch.node_type = UBIFS_DENT_NODE; 718 if (fname_name(nm) == NULL) 719 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash); 720 else 721 dent_key_init(c, &dent_key, dir->i_ino, nm); 722 } else { 723 dent->ch.node_type = UBIFS_XENT_NODE; 724 xent_key_init(c, &dent_key, dir->i_ino, nm); 725 } 726 727 key_write(c, &dent_key, dent->key); 728 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino); 729 dent->type = get_dent_type(inode->i_mode); 730 dent->nlen = cpu_to_le16(fname_len(nm)); 731 memcpy(dent->name, fname_name(nm), fname_len(nm)); 732 dent->name[fname_len(nm)] = '\0'; 733 set_dent_cookie(c, dent); 734 735 zero_dent_node_unused(dent); 736 ubifs_prep_grp_node(c, dent, dlen, 0); 737 err = ubifs_node_calc_hash(c, dent, hash_dent); 738 if (err) 739 goto out_release; 740 741 ino = (void *)dent + aligned_dlen; 742 pack_inode(c, ino, inode, 0); 743 err = ubifs_node_calc_hash(c, ino, hash_ino); 744 if (err) 745 goto out_release; 746 747 ino = (void *)ino + aligned_ilen; 748 pack_inode(c, ino, dir, 1); 749 err = ubifs_node_calc_hash(c, ino, hash_ino_host); 750 if (err) 751 goto out_release; 752 753 if (last_reference) { 754 err = ubifs_add_orphan(c, inode->i_ino); 755 if (err) { 756 release_head(c, BASEHD); 757 goto out_finish; 758 } 759 ui->del_cmtno = c->cmt_no; 760 orphan_added = 1; 761 } 762 763 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); 764 if (err) 765 goto out_release; 766 if (!sync) { 767 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 768 769 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 770 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino); 771 } 772 release_head(c, BASEHD); 773 kfree(dent); 774 ubifs_add_auth_dirt(c, lnum); 775 776 if (deletion) { 777 if (fname_name(nm) == NULL) 778 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash); 779 else 780 err = ubifs_tnc_remove_nm(c, &dent_key, nm); 781 if (err) 782 goto out_ro; 783 err = ubifs_add_dirt(c, lnum, dlen); 784 } else 785 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, 786 hash_dent, nm); 787 if (err) 788 goto out_ro; 789 790 /* 791 * Note, we do not remove the inode from TNC even if the last reference 792 * to it has just been deleted, because the inode may still be opened. 793 * Instead, the inode has been added to orphan lists and the orphan 794 * subsystem will take further care about it. 795 */ 796 ino_key_init(c, &ino_key, inode->i_ino); 797 ino_offs = dent_offs + aligned_dlen; 798 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino); 799 if (err) 800 goto out_ro; 801 802 ino_key_init(c, &ino_key, dir->i_ino); 803 ino_offs += aligned_ilen; 804 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, 805 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host); 806 if (err) 807 goto out_ro; 808 809 finish_reservation(c); 810 spin_lock(&ui->ui_lock); 811 ui->synced_i_size = ui->ui_size; 812 spin_unlock(&ui->ui_lock); 813 if (xent) { 814 spin_lock(&host_ui->ui_lock); 815 host_ui->synced_i_size = host_ui->ui_size; 816 spin_unlock(&host_ui->ui_lock); 817 } 818 mark_inode_clean(c, ui); 819 mark_inode_clean(c, host_ui); 820 return 0; 821 822 out_finish: 823 finish_reservation(c); 824 out_free: 825 kfree(dent); 826 return err; 827 828 out_release: 829 release_head(c, BASEHD); 830 kfree(dent); 831 out_ro: 832 ubifs_ro_mode(c, err); 833 if (orphan_added) 834 ubifs_delete_orphan(c, inode->i_ino); 835 finish_reservation(c); 836 return err; 837 } 838 839 /** 840 * ubifs_jnl_write_data - write a data node to the journal. 841 * @c: UBIFS file-system description object 842 * @inode: inode the data node belongs to 843 * @key: node key 844 * @buf: buffer to write 845 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE) 846 * 847 * This function writes a data node to the journal. Returns %0 if the data node 848 * was successfully written, and a negative error code in case of failure. 849 */ 850 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, 851 const union ubifs_key *key, const void *buf, int len) 852 { 853 struct ubifs_data_node *data; 854 int err, lnum, offs, compr_type, out_len, compr_len, auth_len; 855 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1; 856 int write_len; 857 struct ubifs_inode *ui = ubifs_inode(inode); 858 bool encrypted = IS_ENCRYPTED(inode); 859 u8 hash[UBIFS_HASH_ARR_SZ]; 860 861 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", 862 (unsigned long)key_inum(c, key), key_block(c, key), len); 863 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE); 864 865 if (encrypted) 866 dlen += UBIFS_CIPHER_BLOCK_SIZE; 867 868 auth_len = ubifs_auth_node_sz(c); 869 870 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN); 871 if (!data) { 872 /* 873 * Fall-back to the write reserve buffer. Note, we might be 874 * currently on the memory reclaim path, when the kernel is 875 * trying to free some memory by writing out dirty pages. The 876 * write reserve buffer helps us to guarantee that we are 877 * always able to write the data. 878 */ 879 allocated = 0; 880 mutex_lock(&c->write_reserve_mutex); 881 data = c->write_reserve_buf; 882 } 883 884 data->ch.node_type = UBIFS_DATA_NODE; 885 key_write(c, key, &data->key); 886 data->size = cpu_to_le32(len); 887 888 if (!(ui->flags & UBIFS_COMPR_FL)) 889 /* Compression is disabled for this inode */ 890 compr_type = UBIFS_COMPR_NONE; 891 else 892 compr_type = ui->compr_type; 893 894 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ; 895 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type); 896 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); 897 898 if (encrypted) { 899 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key)); 900 if (err) 901 goto out_free; 902 903 } else { 904 data->compr_size = 0; 905 out_len = compr_len; 906 } 907 908 dlen = UBIFS_DATA_NODE_SZ + out_len; 909 if (ubifs_authenticated(c)) 910 write_len = ALIGN(dlen, 8) + auth_len; 911 else 912 write_len = dlen; 913 914 data->compr_type = cpu_to_le16(compr_type); 915 916 /* Make reservation before allocating sequence numbers */ 917 err = make_reservation(c, DATAHD, write_len); 918 if (err) 919 goto out_free; 920 921 ubifs_prepare_node(c, data, dlen, 0); 922 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0); 923 if (err) 924 goto out_release; 925 926 err = ubifs_node_calc_hash(c, data, hash); 927 if (err) 928 goto out_release; 929 930 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); 931 release_head(c, DATAHD); 932 933 ubifs_add_auth_dirt(c, lnum); 934 935 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash); 936 if (err) 937 goto out_ro; 938 939 finish_reservation(c); 940 if (!allocated) 941 mutex_unlock(&c->write_reserve_mutex); 942 else 943 kfree(data); 944 return 0; 945 946 out_release: 947 release_head(c, DATAHD); 948 out_ro: 949 ubifs_ro_mode(c, err); 950 finish_reservation(c); 951 out_free: 952 if (!allocated) 953 mutex_unlock(&c->write_reserve_mutex); 954 else 955 kfree(data); 956 return err; 957 } 958 959 /** 960 * ubifs_jnl_write_inode - flush inode to the journal. 961 * @c: UBIFS file-system description object 962 * @inode: inode to flush 963 * 964 * This function writes inode @inode to the journal. If the inode is 965 * synchronous, it also synchronizes the write-buffer. Returns zero in case of 966 * success and a negative error code in case of failure. 967 */ 968 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) 969 { 970 int err, lnum, offs; 971 struct ubifs_ino_node *ino, *ino_start; 972 struct ubifs_inode *ui = ubifs_inode(inode); 973 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ; 974 int last_reference = !inode->i_nlink; 975 int kill_xattrs = ui->xattr_cnt && last_reference; 976 u8 hash[UBIFS_HASH_ARR_SZ]; 977 978 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink); 979 980 /* 981 * If the inode is being deleted, do not write the attached data. No 982 * need to synchronize the write-buffer either. 983 */ 984 if (!last_reference) { 985 ilen += ui->data_len; 986 sync = IS_SYNC(inode); 987 } else if (kill_xattrs) { 988 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt; 989 } 990 991 if (ubifs_authenticated(c)) 992 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c); 993 else 994 write_len += ilen; 995 996 ino_start = ino = kmalloc(write_len, GFP_NOFS); 997 if (!ino) 998 return -ENOMEM; 999 1000 /* Make reservation before allocating sequence numbers */ 1001 err = make_reservation(c, BASEHD, write_len); 1002 if (err) 1003 goto out_free; 1004 1005 if (kill_xattrs) { 1006 union ubifs_key key; 1007 struct fscrypt_name nm = {0}; 1008 struct inode *xino; 1009 struct ubifs_dent_node *xent, *pxent = NULL; 1010 1011 if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) { 1012 err = -EPERM; 1013 ubifs_err(c, "Cannot delete inode, it has too much xattrs!"); 1014 goto out_release; 1015 } 1016 1017 lowest_xent_key(c, &key, inode->i_ino); 1018 while (1) { 1019 xent = ubifs_tnc_next_ent(c, &key, &nm); 1020 if (IS_ERR(xent)) { 1021 err = PTR_ERR(xent); 1022 if (err == -ENOENT) 1023 break; 1024 1025 kfree(pxent); 1026 goto out_release; 1027 } 1028 1029 fname_name(&nm) = xent->name; 1030 fname_len(&nm) = le16_to_cpu(xent->nlen); 1031 1032 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); 1033 if (IS_ERR(xino)) { 1034 err = PTR_ERR(xino); 1035 ubifs_err(c, "dead directory entry '%s', error %d", 1036 xent->name, err); 1037 ubifs_ro_mode(c, err); 1038 kfree(pxent); 1039 kfree(xent); 1040 goto out_release; 1041 } 1042 ubifs_assert(c, ubifs_inode(xino)->xattr); 1043 1044 clear_nlink(xino); 1045 pack_inode(c, ino, xino, 0); 1046 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1047 iput(xino); 1048 1049 kfree(pxent); 1050 pxent = xent; 1051 key_read(c, &xent->key, &key); 1052 } 1053 kfree(pxent); 1054 } 1055 1056 pack_inode(c, ino, inode, 1); 1057 err = ubifs_node_calc_hash(c, ino, hash); 1058 if (err) 1059 goto out_release; 1060 1061 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync); 1062 if (err) 1063 goto out_release; 1064 if (!sync) 1065 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1066 inode->i_ino); 1067 release_head(c, BASEHD); 1068 1069 if (last_reference) { 1070 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1071 if (err) 1072 goto out_ro; 1073 ubifs_delete_orphan(c, inode->i_ino); 1074 err = ubifs_add_dirt(c, lnum, write_len); 1075 } else { 1076 union ubifs_key key; 1077 1078 ubifs_add_auth_dirt(c, lnum); 1079 1080 ino_key_init(c, &key, inode->i_ino); 1081 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); 1082 } 1083 if (err) 1084 goto out_ro; 1085 1086 finish_reservation(c); 1087 spin_lock(&ui->ui_lock); 1088 ui->synced_i_size = ui->ui_size; 1089 spin_unlock(&ui->ui_lock); 1090 kfree(ino_start); 1091 return 0; 1092 1093 out_release: 1094 release_head(c, BASEHD); 1095 out_ro: 1096 ubifs_ro_mode(c, err); 1097 finish_reservation(c); 1098 out_free: 1099 kfree(ino_start); 1100 return err; 1101 } 1102 1103 /** 1104 * ubifs_jnl_delete_inode - delete an inode. 1105 * @c: UBIFS file-system description object 1106 * @inode: inode to delete 1107 * 1108 * This function deletes inode @inode which includes removing it from orphans, 1109 * deleting it from TNC and, in some cases, writing a deletion inode to the 1110 * journal. 1111 * 1112 * When regular file inodes are unlinked or a directory inode is removed, the 1113 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and 1114 * direntry to the media, and adds the inode to orphans. After this, when the 1115 * last reference to this inode has been dropped, this function is called. In 1116 * general, it has to write one more deletion inode to the media, because if 1117 * a commit happened between 'ubifs_jnl_update()' and 1118 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal 1119 * anymore, and in fact it might not be on the flash anymore, because it might 1120 * have been garbage-collected already. And for optimization reasons UBIFS does 1121 * not read the orphan area if it has been unmounted cleanly, so it would have 1122 * no indication in the journal that there is a deleted inode which has to be 1123 * removed from TNC. 1124 * 1125 * However, if there was no commit between 'ubifs_jnl_update()' and 1126 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion 1127 * inode to the media for the second time. And this is quite a typical case. 1128 * 1129 * This function returns zero in case of success and a negative error code in 1130 * case of failure. 1131 */ 1132 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) 1133 { 1134 int err; 1135 struct ubifs_inode *ui = ubifs_inode(inode); 1136 1137 ubifs_assert(c, inode->i_nlink == 0); 1138 1139 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no) 1140 /* A commit happened for sure or inode hosts xattrs */ 1141 return ubifs_jnl_write_inode(c, inode); 1142 1143 down_read(&c->commit_sem); 1144 /* 1145 * Check commit number again, because the first test has been done 1146 * without @c->commit_sem, so a commit might have happened. 1147 */ 1148 if (ui->del_cmtno != c->cmt_no) { 1149 up_read(&c->commit_sem); 1150 return ubifs_jnl_write_inode(c, inode); 1151 } 1152 1153 err = ubifs_tnc_remove_ino(c, inode->i_ino); 1154 if (err) 1155 ubifs_ro_mode(c, err); 1156 else 1157 ubifs_delete_orphan(c, inode->i_ino); 1158 up_read(&c->commit_sem); 1159 return err; 1160 } 1161 1162 /** 1163 * ubifs_jnl_xrename - cross rename two directory entries. 1164 * @c: UBIFS file-system description object 1165 * @fst_dir: parent inode of 1st directory entry to exchange 1166 * @fst_inode: 1st inode to exchange 1167 * @fst_nm: name of 1st inode to exchange 1168 * @snd_dir: parent inode of 2nd directory entry to exchange 1169 * @snd_inode: 2nd inode to exchange 1170 * @snd_nm: name of 2nd inode to exchange 1171 * @sync: non-zero if the write-buffer has to be synchronized 1172 * 1173 * This function implements the cross rename operation which may involve 1174 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean 1175 * and returns zero on success. In case of failure, a negative error code is 1176 * returned. 1177 */ 1178 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, 1179 const struct inode *fst_inode, 1180 const struct fscrypt_name *fst_nm, 1181 const struct inode *snd_dir, 1182 const struct inode *snd_inode, 1183 const struct fscrypt_name *snd_nm, int sync) 1184 { 1185 union ubifs_key key; 1186 struct ubifs_dent_node *dent1, *dent2; 1187 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ; 1188 int aligned_dlen1, aligned_dlen2; 1189 int twoparents = (fst_dir != snd_dir); 1190 void *p; 1191 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1192 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1193 u8 hash_p1[UBIFS_HASH_ARR_SZ]; 1194 u8 hash_p2[UBIFS_HASH_ARR_SZ]; 1195 1196 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); 1197 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); 1198 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); 1199 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); 1200 1201 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1; 1202 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1; 1203 aligned_dlen1 = ALIGN(dlen1, 8); 1204 aligned_dlen2 = ALIGN(dlen2, 8); 1205 1206 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8); 1207 if (twoparents) 1208 len += plen; 1209 1210 len += ubifs_auth_node_sz(c); 1211 1212 dent1 = kzalloc(len, GFP_NOFS); 1213 if (!dent1) 1214 return -ENOMEM; 1215 1216 /* Make reservation before allocating sequence numbers */ 1217 err = make_reservation(c, BASEHD, len); 1218 if (err) 1219 goto out_free; 1220 1221 /* Make new dent for 1st entry */ 1222 dent1->ch.node_type = UBIFS_DENT_NODE; 1223 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm); 1224 dent1->inum = cpu_to_le64(fst_inode->i_ino); 1225 dent1->type = get_dent_type(fst_inode->i_mode); 1226 dent1->nlen = cpu_to_le16(fname_len(snd_nm)); 1227 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm)); 1228 dent1->name[fname_len(snd_nm)] = '\0'; 1229 set_dent_cookie(c, dent1); 1230 zero_dent_node_unused(dent1); 1231 ubifs_prep_grp_node(c, dent1, dlen1, 0); 1232 err = ubifs_node_calc_hash(c, dent1, hash_dent1); 1233 if (err) 1234 goto out_release; 1235 1236 /* Make new dent for 2nd entry */ 1237 dent2 = (void *)dent1 + aligned_dlen1; 1238 dent2->ch.node_type = UBIFS_DENT_NODE; 1239 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm); 1240 dent2->inum = cpu_to_le64(snd_inode->i_ino); 1241 dent2->type = get_dent_type(snd_inode->i_mode); 1242 dent2->nlen = cpu_to_le16(fname_len(fst_nm)); 1243 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm)); 1244 dent2->name[fname_len(fst_nm)] = '\0'; 1245 set_dent_cookie(c, dent2); 1246 zero_dent_node_unused(dent2); 1247 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1248 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1249 if (err) 1250 goto out_release; 1251 1252 p = (void *)dent2 + aligned_dlen2; 1253 if (!twoparents) { 1254 pack_inode(c, p, fst_dir, 1); 1255 err = ubifs_node_calc_hash(c, p, hash_p1); 1256 if (err) 1257 goto out_release; 1258 } else { 1259 pack_inode(c, p, fst_dir, 0); 1260 err = ubifs_node_calc_hash(c, p, hash_p1); 1261 if (err) 1262 goto out_release; 1263 p += ALIGN(plen, 8); 1264 pack_inode(c, p, snd_dir, 1); 1265 err = ubifs_node_calc_hash(c, p, hash_p2); 1266 if (err) 1267 goto out_release; 1268 } 1269 1270 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); 1271 if (err) 1272 goto out_release; 1273 if (!sync) { 1274 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1275 1276 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino); 1277 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino); 1278 } 1279 release_head(c, BASEHD); 1280 1281 ubifs_add_auth_dirt(c, lnum); 1282 1283 dent_key_init(c, &key, snd_dir->i_ino, snd_nm); 1284 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm); 1285 if (err) 1286 goto out_ro; 1287 1288 offs += aligned_dlen1; 1289 dent_key_init(c, &key, fst_dir->i_ino, fst_nm); 1290 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm); 1291 if (err) 1292 goto out_ro; 1293 1294 offs += aligned_dlen2; 1295 1296 ino_key_init(c, &key, fst_dir->i_ino); 1297 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1); 1298 if (err) 1299 goto out_ro; 1300 1301 if (twoparents) { 1302 offs += ALIGN(plen, 8); 1303 ino_key_init(c, &key, snd_dir->i_ino); 1304 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2); 1305 if (err) 1306 goto out_ro; 1307 } 1308 1309 finish_reservation(c); 1310 1311 mark_inode_clean(c, ubifs_inode(fst_dir)); 1312 if (twoparents) 1313 mark_inode_clean(c, ubifs_inode(snd_dir)); 1314 kfree(dent1); 1315 return 0; 1316 1317 out_release: 1318 release_head(c, BASEHD); 1319 out_ro: 1320 ubifs_ro_mode(c, err); 1321 finish_reservation(c); 1322 out_free: 1323 kfree(dent1); 1324 return err; 1325 } 1326 1327 /** 1328 * ubifs_jnl_rename - rename a directory entry. 1329 * @c: UBIFS file-system description object 1330 * @old_dir: parent inode of directory entry to rename 1331 * @old_inode: directory entry's inode to rename 1332 * @old_nm: name of the old directory entry to rename 1333 * @new_dir: parent inode of directory entry to rename 1334 * @new_inode: new directory entry's inode (or directory entry's inode to 1335 * replace) 1336 * @new_nm: new name of the new directory entry 1337 * @whiteout: whiteout inode 1338 * @sync: non-zero if the write-buffer has to be synchronized 1339 * 1340 * This function implements the re-name operation which may involve writing up 1341 * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes) 1342 * and 2 directory entries. It marks the written inodes as clean and returns 1343 * zero on success. In case of failure, a negative error code is returned. 1344 */ 1345 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, 1346 const struct inode *old_inode, 1347 const struct fscrypt_name *old_nm, 1348 const struct inode *new_dir, 1349 const struct inode *new_inode, 1350 const struct fscrypt_name *new_nm, 1351 const struct inode *whiteout, int sync) 1352 { 1353 void *p; 1354 union ubifs_key key; 1355 struct ubifs_dent_node *dent, *dent2; 1356 int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0; 1357 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; 1358 int last_reference = !!(new_inode && new_inode->i_nlink == 0); 1359 int move = (old_dir != new_dir); 1360 struct ubifs_inode *new_ui, *whiteout_ui; 1361 u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; 1362 u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; 1363 u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; 1364 u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ]; 1365 u8 hash_dent1[UBIFS_HASH_ARR_SZ]; 1366 u8 hash_dent2[UBIFS_HASH_ARR_SZ]; 1367 1368 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); 1369 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); 1370 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); 1371 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); 1372 1373 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1; 1374 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1; 1375 if (new_inode) { 1376 new_ui = ubifs_inode(new_inode); 1377 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex)); 1378 ilen = UBIFS_INO_NODE_SZ; 1379 if (!last_reference) 1380 ilen += new_ui->data_len; 1381 } else 1382 ilen = 0; 1383 1384 if (whiteout) { 1385 whiteout_ui = ubifs_inode(whiteout); 1386 ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex)); 1387 ubifs_assert(c, whiteout->i_nlink == 1); 1388 ubifs_assert(c, !whiteout_ui->dirty); 1389 wlen = UBIFS_INO_NODE_SZ; 1390 wlen += whiteout_ui->data_len; 1391 } else 1392 wlen = 0; 1393 1394 aligned_dlen1 = ALIGN(dlen1, 8); 1395 aligned_dlen2 = ALIGN(dlen2, 8); 1396 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + 1397 ALIGN(wlen, 8) + ALIGN(plen, 8); 1398 if (move) 1399 len += plen; 1400 1401 len += ubifs_auth_node_sz(c); 1402 1403 dent = kzalloc(len, GFP_NOFS); 1404 if (!dent) 1405 return -ENOMEM; 1406 1407 /* Make reservation before allocating sequence numbers */ 1408 err = make_reservation(c, BASEHD, len); 1409 if (err) 1410 goto out_free; 1411 1412 /* Make new dent */ 1413 dent->ch.node_type = UBIFS_DENT_NODE; 1414 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm); 1415 dent->inum = cpu_to_le64(old_inode->i_ino); 1416 dent->type = get_dent_type(old_inode->i_mode); 1417 dent->nlen = cpu_to_le16(fname_len(new_nm)); 1418 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm)); 1419 dent->name[fname_len(new_nm)] = '\0'; 1420 set_dent_cookie(c, dent); 1421 zero_dent_node_unused(dent); 1422 ubifs_prep_grp_node(c, dent, dlen1, 0); 1423 err = ubifs_node_calc_hash(c, dent, hash_dent1); 1424 if (err) 1425 goto out_release; 1426 1427 dent2 = (void *)dent + aligned_dlen1; 1428 dent2->ch.node_type = UBIFS_DENT_NODE; 1429 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm); 1430 1431 if (whiteout) { 1432 dent2->inum = cpu_to_le64(whiteout->i_ino); 1433 dent2->type = get_dent_type(whiteout->i_mode); 1434 } else { 1435 /* Make deletion dent */ 1436 dent2->inum = 0; 1437 dent2->type = DT_UNKNOWN; 1438 } 1439 dent2->nlen = cpu_to_le16(fname_len(old_nm)); 1440 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm)); 1441 dent2->name[fname_len(old_nm)] = '\0'; 1442 set_dent_cookie(c, dent2); 1443 zero_dent_node_unused(dent2); 1444 ubifs_prep_grp_node(c, dent2, dlen2, 0); 1445 err = ubifs_node_calc_hash(c, dent2, hash_dent2); 1446 if (err) 1447 goto out_release; 1448 1449 p = (void *)dent2 + aligned_dlen2; 1450 if (new_inode) { 1451 pack_inode(c, p, new_inode, 0); 1452 err = ubifs_node_calc_hash(c, p, hash_new_inode); 1453 if (err) 1454 goto out_release; 1455 1456 p += ALIGN(ilen, 8); 1457 } 1458 1459 if (whiteout) { 1460 pack_inode(c, p, whiteout, 0); 1461 err = ubifs_node_calc_hash(c, p, hash_whiteout_inode); 1462 if (err) 1463 goto out_release; 1464 1465 p += ALIGN(wlen, 8); 1466 } 1467 1468 if (!move) { 1469 pack_inode(c, p, old_dir, 1); 1470 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1471 if (err) 1472 goto out_release; 1473 } else { 1474 pack_inode(c, p, old_dir, 0); 1475 err = ubifs_node_calc_hash(c, p, hash_old_dir); 1476 if (err) 1477 goto out_release; 1478 1479 p += ALIGN(plen, 8); 1480 pack_inode(c, p, new_dir, 1); 1481 err = ubifs_node_calc_hash(c, p, hash_new_dir); 1482 if (err) 1483 goto out_release; 1484 } 1485 1486 if (last_reference) { 1487 err = ubifs_add_orphan(c, new_inode->i_ino); 1488 if (err) { 1489 release_head(c, BASEHD); 1490 goto out_finish; 1491 } 1492 new_ui->del_cmtno = c->cmt_no; 1493 orphan_added = 1; 1494 } 1495 1496 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); 1497 if (err) 1498 goto out_release; 1499 if (!sync) { 1500 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 1501 1502 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino); 1503 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino); 1504 if (new_inode) 1505 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1506 new_inode->i_ino); 1507 if (whiteout) 1508 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, 1509 whiteout->i_ino); 1510 } 1511 release_head(c, BASEHD); 1512 1513 ubifs_add_auth_dirt(c, lnum); 1514 1515 dent_key_init(c, &key, new_dir->i_ino, new_nm); 1516 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm); 1517 if (err) 1518 goto out_ro; 1519 1520 offs += aligned_dlen1; 1521 if (whiteout) { 1522 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1523 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); 1524 if (err) 1525 goto out_ro; 1526 } else { 1527 err = ubifs_add_dirt(c, lnum, dlen2); 1528 if (err) 1529 goto out_ro; 1530 1531 dent_key_init(c, &key, old_dir->i_ino, old_nm); 1532 err = ubifs_tnc_remove_nm(c, &key, old_nm); 1533 if (err) 1534 goto out_ro; 1535 } 1536 1537 offs += aligned_dlen2; 1538 if (new_inode) { 1539 ino_key_init(c, &key, new_inode->i_ino); 1540 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode); 1541 if (err) 1542 goto out_ro; 1543 offs += ALIGN(ilen, 8); 1544 } 1545 1546 if (whiteout) { 1547 ino_key_init(c, &key, whiteout->i_ino); 1548 err = ubifs_tnc_add(c, &key, lnum, offs, wlen, 1549 hash_whiteout_inode); 1550 if (err) 1551 goto out_ro; 1552 offs += ALIGN(wlen, 8); 1553 } 1554 1555 ino_key_init(c, &key, old_dir->i_ino); 1556 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); 1557 if (err) 1558 goto out_ro; 1559 1560 if (move) { 1561 offs += ALIGN(plen, 8); 1562 ino_key_init(c, &key, new_dir->i_ino); 1563 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir); 1564 if (err) 1565 goto out_ro; 1566 } 1567 1568 finish_reservation(c); 1569 if (new_inode) { 1570 mark_inode_clean(c, new_ui); 1571 spin_lock(&new_ui->ui_lock); 1572 new_ui->synced_i_size = new_ui->ui_size; 1573 spin_unlock(&new_ui->ui_lock); 1574 } 1575 /* 1576 * No need to mark whiteout inode clean. 1577 * Whiteout doesn't have non-zero size, no need to update 1578 * synced_i_size for whiteout_ui. 1579 */ 1580 mark_inode_clean(c, ubifs_inode(old_dir)); 1581 if (move) 1582 mark_inode_clean(c, ubifs_inode(new_dir)); 1583 kfree(dent); 1584 return 0; 1585 1586 out_release: 1587 release_head(c, BASEHD); 1588 out_ro: 1589 ubifs_ro_mode(c, err); 1590 if (orphan_added) 1591 ubifs_delete_orphan(c, new_inode->i_ino); 1592 out_finish: 1593 finish_reservation(c); 1594 out_free: 1595 kfree(dent); 1596 return err; 1597 } 1598 1599 /** 1600 * truncate_data_node - re-compress/encrypt a truncated data node. 1601 * @c: UBIFS file-system description object 1602 * @inode: inode which refers to the data node 1603 * @block: data block number 1604 * @dn: data node to re-compress 1605 * @new_len: new length 1606 * @dn_size: size of the data node @dn in memory 1607 * 1608 * This function is used when an inode is truncated and the last data node of 1609 * the inode has to be re-compressed/encrypted and re-written. 1610 */ 1611 static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode, 1612 unsigned int block, struct ubifs_data_node *dn, 1613 int *new_len, int dn_size) 1614 { 1615 void *buf; 1616 int err, dlen, compr_type, out_len, data_size; 1617 1618 out_len = le32_to_cpu(dn->size); 1619 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); 1620 if (!buf) 1621 return -ENOMEM; 1622 1623 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; 1624 data_size = dn_size - UBIFS_DATA_NODE_SZ; 1625 compr_type = le16_to_cpu(dn->compr_type); 1626 1627 if (IS_ENCRYPTED(inode)) { 1628 err = ubifs_decrypt(inode, dn, &dlen, block); 1629 if (err) 1630 goto out; 1631 } 1632 1633 if (compr_type == UBIFS_COMPR_NONE) { 1634 out_len = *new_len; 1635 } else { 1636 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); 1637 if (err) 1638 goto out; 1639 1640 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); 1641 } 1642 1643 if (IS_ENCRYPTED(inode)) { 1644 err = ubifs_encrypt(inode, dn, out_len, &data_size, block); 1645 if (err) 1646 goto out; 1647 1648 out_len = data_size; 1649 } else { 1650 dn->compr_size = 0; 1651 } 1652 1653 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE); 1654 dn->compr_type = cpu_to_le16(compr_type); 1655 dn->size = cpu_to_le32(*new_len); 1656 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1657 err = 0; 1658 out: 1659 kfree(buf); 1660 return err; 1661 } 1662 1663 /** 1664 * ubifs_jnl_truncate - update the journal for a truncation. 1665 * @c: UBIFS file-system description object 1666 * @inode: inode to truncate 1667 * @old_size: old size 1668 * @new_size: new size 1669 * 1670 * When the size of a file decreases due to truncation, a truncation node is 1671 * written, the journal tree is updated, and the last data block is re-written 1672 * if it has been affected. The inode is also updated in order to synchronize 1673 * the new inode size. 1674 * 1675 * This function marks the inode as clean and returns zero on success. In case 1676 * of failure, a negative error code is returned. 1677 */ 1678 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, 1679 loff_t old_size, loff_t new_size) 1680 { 1681 union ubifs_key key, to_key; 1682 struct ubifs_ino_node *ino; 1683 struct ubifs_trun_node *trun; 1684 struct ubifs_data_node *dn; 1685 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); 1686 int dn_size; 1687 struct ubifs_inode *ui = ubifs_inode(inode); 1688 ino_t inum = inode->i_ino; 1689 unsigned int blk; 1690 u8 hash_ino[UBIFS_HASH_ARR_SZ]; 1691 u8 hash_dn[UBIFS_HASH_ARR_SZ]; 1692 1693 dbg_jnl("ino %lu, size %lld -> %lld", 1694 (unsigned long)inum, old_size, new_size); 1695 ubifs_assert(c, !ui->data_len); 1696 ubifs_assert(c, S_ISREG(inode->i_mode)); 1697 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 1698 1699 dn_size = COMPRESSED_DATA_NODE_BUF_SZ; 1700 1701 if (IS_ENCRYPTED(inode)) 1702 dn_size += UBIFS_CIPHER_BLOCK_SIZE; 1703 1704 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + 1705 dn_size + ubifs_auth_node_sz(c); 1706 1707 ino = kmalloc(sz, GFP_NOFS); 1708 if (!ino) 1709 return -ENOMEM; 1710 1711 trun = (void *)ino + UBIFS_INO_NODE_SZ; 1712 trun->ch.node_type = UBIFS_TRUN_NODE; 1713 trun->inum = cpu_to_le32(inum); 1714 trun->old_size = cpu_to_le64(old_size); 1715 trun->new_size = cpu_to_le64(new_size); 1716 zero_trun_node_unused(trun); 1717 1718 dlen = new_size & (UBIFS_BLOCK_SIZE - 1); 1719 if (dlen) { 1720 /* Get last data block so it can be truncated */ 1721 dn = (void *)trun + UBIFS_TRUN_NODE_SZ; 1722 blk = new_size >> UBIFS_BLOCK_SHIFT; 1723 data_key_init(c, &key, inum, blk); 1724 dbg_jnlk(&key, "last block key "); 1725 err = ubifs_tnc_lookup(c, &key, dn); 1726 if (err == -ENOENT) 1727 dlen = 0; /* Not found (so it is a hole) */ 1728 else if (err) 1729 goto out_free; 1730 else { 1731 int dn_len = le32_to_cpu(dn->size); 1732 1733 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { 1734 ubifs_err(c, "bad data node (block %u, inode %lu)", 1735 blk, inode->i_ino); 1736 ubifs_dump_node(c, dn, dn_size); 1737 err = -EUCLEAN; 1738 goto out_free; 1739 } 1740 1741 if (dn_len <= dlen) 1742 dlen = 0; /* Nothing to do */ 1743 else { 1744 err = truncate_data_node(c, inode, blk, dn, 1745 &dlen, dn_size); 1746 if (err) 1747 goto out_free; 1748 } 1749 } 1750 } 1751 1752 /* Must make reservation before allocating sequence numbers */ 1753 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ; 1754 1755 if (ubifs_authenticated(c)) 1756 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c); 1757 else 1758 len += dlen; 1759 1760 err = make_reservation(c, BASEHD, len); 1761 if (err) 1762 goto out_free; 1763 1764 pack_inode(c, ino, inode, 0); 1765 err = ubifs_node_calc_hash(c, ino, hash_ino); 1766 if (err) 1767 goto out_release; 1768 1769 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); 1770 if (dlen) { 1771 ubifs_prep_grp_node(c, dn, dlen, 1); 1772 err = ubifs_node_calc_hash(c, dn, hash_dn); 1773 if (err) 1774 goto out_release; 1775 } 1776 1777 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); 1778 if (err) 1779 goto out_release; 1780 if (!sync) 1781 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); 1782 release_head(c, BASEHD); 1783 1784 ubifs_add_auth_dirt(c, lnum); 1785 1786 if (dlen) { 1787 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ; 1788 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn); 1789 if (err) 1790 goto out_ro; 1791 } 1792 1793 ino_key_init(c, &key, inum); 1794 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino); 1795 if (err) 1796 goto out_ro; 1797 1798 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ); 1799 if (err) 1800 goto out_ro; 1801 1802 bit = new_size & (UBIFS_BLOCK_SIZE - 1); 1803 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0); 1804 data_key_init(c, &key, inum, blk); 1805 1806 bit = old_size & (UBIFS_BLOCK_SIZE - 1); 1807 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); 1808 data_key_init(c, &to_key, inum, blk); 1809 1810 err = ubifs_tnc_remove_range(c, &key, &to_key); 1811 if (err) 1812 goto out_ro; 1813 1814 finish_reservation(c); 1815 spin_lock(&ui->ui_lock); 1816 ui->synced_i_size = ui->ui_size; 1817 spin_unlock(&ui->ui_lock); 1818 mark_inode_clean(c, ui); 1819 kfree(ino); 1820 return 0; 1821 1822 out_release: 1823 release_head(c, BASEHD); 1824 out_ro: 1825 ubifs_ro_mode(c, err); 1826 finish_reservation(c); 1827 out_free: 1828 kfree(ino); 1829 return err; 1830 } 1831 1832 1833 /** 1834 * ubifs_jnl_delete_xattr - delete an extended attribute. 1835 * @c: UBIFS file-system description object 1836 * @host: host inode 1837 * @inode: extended attribute inode 1838 * @nm: extended attribute entry name 1839 * 1840 * This function delete an extended attribute which is very similar to 1841 * un-linking regular files - it writes a deletion xentry, a deletion inode and 1842 * updates the target inode. Returns zero in case of success and a negative 1843 * error code in case of failure. 1844 */ 1845 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, 1846 const struct inode *inode, 1847 const struct fscrypt_name *nm) 1848 { 1849 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len; 1850 struct ubifs_dent_node *xent; 1851 struct ubifs_ino_node *ino; 1852 union ubifs_key xent_key, key1, key2; 1853 int sync = IS_DIRSYNC(host); 1854 struct ubifs_inode *host_ui = ubifs_inode(host); 1855 u8 hash[UBIFS_HASH_ARR_SZ]; 1856 1857 ubifs_assert(c, inode->i_nlink == 0); 1858 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1859 1860 /* 1861 * Since we are deleting the inode, we do not bother to attach any data 1862 * to it and assume its length is %UBIFS_INO_NODE_SZ. 1863 */ 1864 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 1865 aligned_xlen = ALIGN(xlen, 8); 1866 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; 1867 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); 1868 1869 write_len = len + ubifs_auth_node_sz(c); 1870 1871 xent = kzalloc(write_len, GFP_NOFS); 1872 if (!xent) 1873 return -ENOMEM; 1874 1875 /* Make reservation before allocating sequence numbers */ 1876 err = make_reservation(c, BASEHD, write_len); 1877 if (err) { 1878 kfree(xent); 1879 return err; 1880 } 1881 1882 xent->ch.node_type = UBIFS_XENT_NODE; 1883 xent_key_init(c, &xent_key, host->i_ino, nm); 1884 key_write(c, &xent_key, xent->key); 1885 xent->inum = 0; 1886 xent->type = get_dent_type(inode->i_mode); 1887 xent->nlen = cpu_to_le16(fname_len(nm)); 1888 memcpy(xent->name, fname_name(nm), fname_len(nm)); 1889 xent->name[fname_len(nm)] = '\0'; 1890 zero_dent_node_unused(xent); 1891 ubifs_prep_grp_node(c, xent, xlen, 0); 1892 1893 ino = (void *)xent + aligned_xlen; 1894 pack_inode(c, ino, inode, 0); 1895 ino = (void *)ino + UBIFS_INO_NODE_SZ; 1896 pack_inode(c, ino, host, 1); 1897 err = ubifs_node_calc_hash(c, ino, hash); 1898 if (err) 1899 goto out_release; 1900 1901 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync); 1902 if (!sync && !err) 1903 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); 1904 release_head(c, BASEHD); 1905 1906 ubifs_add_auth_dirt(c, lnum); 1907 kfree(xent); 1908 if (err) 1909 goto out_ro; 1910 1911 /* Remove the extended attribute entry from TNC */ 1912 err = ubifs_tnc_remove_nm(c, &xent_key, nm); 1913 if (err) 1914 goto out_ro; 1915 err = ubifs_add_dirt(c, lnum, xlen); 1916 if (err) 1917 goto out_ro; 1918 1919 /* 1920 * Remove all nodes belonging to the extended attribute inode from TNC. 1921 * Well, there actually must be only one node - the inode itself. 1922 */ 1923 lowest_ino_key(c, &key1, inode->i_ino); 1924 highest_ino_key(c, &key2, inode->i_ino); 1925 err = ubifs_tnc_remove_range(c, &key1, &key2); 1926 if (err) 1927 goto out_ro; 1928 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ); 1929 if (err) 1930 goto out_ro; 1931 1932 /* And update TNC with the new host inode position */ 1933 ino_key_init(c, &key1, host->i_ino); 1934 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash); 1935 if (err) 1936 goto out_ro; 1937 1938 finish_reservation(c); 1939 spin_lock(&host_ui->ui_lock); 1940 host_ui->synced_i_size = host_ui->ui_size; 1941 spin_unlock(&host_ui->ui_lock); 1942 mark_inode_clean(c, host_ui); 1943 return 0; 1944 1945 out_release: 1946 kfree(xent); 1947 release_head(c, BASEHD); 1948 out_ro: 1949 ubifs_ro_mode(c, err); 1950 finish_reservation(c); 1951 return err; 1952 } 1953 1954 /** 1955 * ubifs_jnl_change_xattr - change an extended attribute. 1956 * @c: UBIFS file-system description object 1957 * @inode: extended attribute inode 1958 * @host: host inode 1959 * 1960 * This function writes the updated version of an extended attribute inode and 1961 * the host inode to the journal (to the base head). The host inode is written 1962 * after the extended attribute inode in order to guarantee that the extended 1963 * attribute will be flushed when the inode is synchronized by 'fsync()' and 1964 * consequently, the write-buffer is synchronized. This function returns zero 1965 * in case of success and a negative error code in case of failure. 1966 */ 1967 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, 1968 const struct inode *host) 1969 { 1970 int err, len1, len2, aligned_len, aligned_len1, lnum, offs; 1971 struct ubifs_inode *host_ui = ubifs_inode(host); 1972 struct ubifs_ino_node *ino; 1973 union ubifs_key key; 1974 int sync = IS_DIRSYNC(host); 1975 u8 hash_host[UBIFS_HASH_ARR_SZ]; 1976 u8 hash[UBIFS_HASH_ARR_SZ]; 1977 1978 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); 1979 ubifs_assert(c, inode->i_nlink > 0); 1980 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1981 1982 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len; 1983 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len; 1984 aligned_len1 = ALIGN(len1, 8); 1985 aligned_len = aligned_len1 + ALIGN(len2, 8); 1986 1987 aligned_len += ubifs_auth_node_sz(c); 1988 1989 ino = kzalloc(aligned_len, GFP_NOFS); 1990 if (!ino) 1991 return -ENOMEM; 1992 1993 /* Make reservation before allocating sequence numbers */ 1994 err = make_reservation(c, BASEHD, aligned_len); 1995 if (err) 1996 goto out_free; 1997 1998 pack_inode(c, ino, host, 0); 1999 err = ubifs_node_calc_hash(c, ino, hash_host); 2000 if (err) 2001 goto out_release; 2002 pack_inode(c, (void *)ino + aligned_len1, inode, 1); 2003 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash); 2004 if (err) 2005 goto out_release; 2006 2007 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); 2008 if (!sync && !err) { 2009 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; 2010 2011 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino); 2012 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino); 2013 } 2014 release_head(c, BASEHD); 2015 if (err) 2016 goto out_ro; 2017 2018 ubifs_add_auth_dirt(c, lnum); 2019 2020 ino_key_init(c, &key, host->i_ino); 2021 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host); 2022 if (err) 2023 goto out_ro; 2024 2025 ino_key_init(c, &key, inode->i_ino); 2026 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash); 2027 if (err) 2028 goto out_ro; 2029 2030 finish_reservation(c); 2031 spin_lock(&host_ui->ui_lock); 2032 host_ui->synced_i_size = host_ui->ui_size; 2033 spin_unlock(&host_ui->ui_lock); 2034 mark_inode_clean(c, host_ui); 2035 kfree(ino); 2036 return 0; 2037 2038 out_release: 2039 release_head(c, BASEHD); 2040 out_ro: 2041 ubifs_ro_mode(c, err); 2042 finish_reservation(c); 2043 out_free: 2044 kfree(ino); 2045 return err; 2046 } 2047 2048