1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11 /*
12 * This file implements UBIFS journal.
13 *
14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
15 * length and position, while a bud logical eraseblock is any LEB in the main
16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log
17 * contains only references to buds and some other stuff like commit
18 * start node. The idea is that when we commit the journal, we do
19 * not copy the data, the buds just become indexed. Since after the commit the
20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
22 * become leafs in the future.
23 *
24 * The journal is multi-headed because we want to write data to the journal as
25 * optimally as possible. It is nice to have nodes belonging to the same inode
26 * in one LEB, so we may write data owned by different inodes to different
27 * journal heads, although at present only one data head is used.
28 *
29 * For recovery reasons, the base head contains all inode nodes, all directory
30 * entry nodes and all truncate nodes. This means that the other heads contain
31 * only data nodes.
32 *
33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the
34 * time of commit, the bud is retained to continue to be used in the journal,
35 * even though the "front" of the LEB is now indexed. In that case, the log
36 * reference contains the offset where the bud starts for the purposes of the
37 * journal.
38 *
39 * The journal size has to be limited, because the larger is the journal, the
40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it
41 * takes (indexing in the TNC).
42 *
43 * All the journal write operations like 'ubifs_jnl_update()' here, which write
44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to
45 * unclean reboots. Should the unclean reboot happen, the recovery code drops
46 * all the nodes.
47 */
48
49 #include "ubifs.h"
50
51 /**
52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node.
53 * @ino: the inode to zero out
54 */
zero_ino_node_unused(struct ubifs_ino_node * ino)55 static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
56 {
57 memset(ino->padding1, 0, 4);
58 memset(ino->padding2, 0, 26);
59 }
60
61 /**
62 * zero_dent_node_unused - zero out unused fields of an on-flash directory
63 * entry node.
64 * @dent: the directory entry to zero out
65 */
zero_dent_node_unused(struct ubifs_dent_node * dent)66 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
67 {
68 dent->padding1 = 0;
69 }
70
71 /**
72 * zero_trun_node_unused - zero out unused fields of an on-flash truncation
73 * node.
74 * @trun: the truncation node to zero out
75 */
zero_trun_node_unused(struct ubifs_trun_node * trun)76 static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
77 {
78 memset(trun->padding, 0, 12);
79 }
80
ubifs_add_auth_dirt(struct ubifs_info * c,int lnum)81 static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum)
82 {
83 if (ubifs_authenticated(c))
84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c));
85 }
86
87 /**
88 * reserve_space - reserve space in the journal.
89 * @c: UBIFS file-system description object
90 * @jhead: journal head number
91 * @len: node length
92 *
93 * This function reserves space in journal head @head. If the reservation
94 * succeeded, the journal head stays locked and later has to be unlocked using
95 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to
96 * be done, and other negative error codes in case of other failures.
97 */
reserve_space(struct ubifs_info * c,int jhead,int len)98 static int reserve_space(struct ubifs_info *c, int jhead, int len)
99 {
100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
102
103 /*
104 * Typically, the base head has smaller nodes written to it, so it is
105 * better to try to allocate space at the ends of eraseblocks. This is
106 * what the squeeze parameter does.
107 */
108 ubifs_assert(c, !c->ro_media && !c->ro_mount);
109 squeeze = (jhead == BASEHD);
110 again:
111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
112
113 if (c->ro_error) {
114 err = -EROFS;
115 goto out_unlock;
116 }
117
118 avail = c->leb_size - wbuf->offs - wbuf->used;
119 if (wbuf->lnum != -1 && avail >= len)
120 return 0;
121
122 /*
123 * Write buffer wasn't seek'ed or there is no enough space - look for an
124 * LEB with some empty space.
125 */
126 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
127 if (lnum >= 0)
128 goto out;
129
130 err = lnum;
131 if (err != -ENOSPC)
132 goto out_unlock;
133
134 /*
135 * No free space, we have to run garbage collector to make
136 * some. But the write-buffer mutex has to be unlocked because
137 * GC also takes it.
138 */
139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
140 mutex_unlock(&wbuf->io_mutex);
141
142 lnum = ubifs_garbage_collect(c, 0);
143 if (lnum < 0) {
144 err = lnum;
145 if (err != -ENOSPC)
146 return err;
147
148 /*
149 * GC could not make a free LEB. But someone else may
150 * have allocated new bud for this journal head,
151 * because we dropped @wbuf->io_mutex, so try once
152 * again.
153 */
154 dbg_jnl("GC couldn't make a free LEB for jhead %s",
155 dbg_jhead(jhead));
156 if (retries++ < 2) {
157 dbg_jnl("retry (%d)", retries);
158 goto again;
159 }
160
161 dbg_jnl("return -ENOSPC");
162 return err;
163 }
164
165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
167 avail = c->leb_size - wbuf->offs - wbuf->used;
168
169 if (wbuf->lnum != -1 && avail >= len) {
170 /*
171 * Someone else has switched the journal head and we have
172 * enough space now. This happens when more than one process is
173 * trying to write to the same journal head at the same time.
174 */
175 dbg_jnl("return LEB %d back, already have LEB %d:%d",
176 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
177 err = ubifs_return_leb(c, lnum);
178 if (err)
179 goto out_unlock;
180 return 0;
181 }
182
183 offs = 0;
184
185 out:
186 /*
187 * Make sure we synchronize the write-buffer before we add the new bud
188 * to the log. Otherwise we may have a power cut after the log
189 * reference node for the last bud (@lnum) is written but before the
190 * write-buffer data are written to the next-to-last bud
191 * (@wbuf->lnum). And the effect would be that the recovery would see
192 * that there is corruption in the next-to-last bud.
193 */
194 err = ubifs_wbuf_sync_nolock(wbuf);
195 if (err)
196 goto out_return;
197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
198 if (err)
199 goto out_return;
200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
201 if (err)
202 goto out_unlock;
203
204 return 0;
205
206 out_unlock:
207 mutex_unlock(&wbuf->io_mutex);
208 return err;
209
210 out_return:
211 /* An error occurred and the LEB has to be returned to lprops */
212 ubifs_assert(c, err < 0);
213 err1 = ubifs_return_leb(c, lnum);
214 if (err1 && err == -EAGAIN)
215 /*
216 * Return original error code only if it is not %-EAGAIN,
217 * which is not really an error. Otherwise, return the error
218 * code of 'ubifs_return_leb()'.
219 */
220 err = err1;
221 mutex_unlock(&wbuf->io_mutex);
222 return err;
223 }
224
ubifs_hash_nodes(struct ubifs_info * c,void * node,int len,struct shash_desc * hash)225 static int ubifs_hash_nodes(struct ubifs_info *c, void *node,
226 int len, struct shash_desc *hash)
227 {
228 int auth_node_size = ubifs_auth_node_sz(c);
229 int err;
230
231 while (1) {
232 const struct ubifs_ch *ch = node;
233 int nodelen = le32_to_cpu(ch->len);
234
235 ubifs_assert(c, len >= auth_node_size);
236
237 if (len == auth_node_size)
238 break;
239
240 ubifs_assert(c, len > nodelen);
241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC));
242
243 err = ubifs_shash_update(c, hash, (void *)node, nodelen);
244 if (err)
245 return err;
246
247 node += ALIGN(nodelen, 8);
248 len -= ALIGN(nodelen, 8);
249 }
250
251 return ubifs_prepare_auth_node(c, node, hash);
252 }
253
254 /**
255 * write_head - write data to a journal head.
256 * @c: UBIFS file-system description object
257 * @jhead: journal head
258 * @buf: buffer to write
259 * @len: length to write
260 * @lnum: LEB number written is returned here
261 * @offs: offset written is returned here
262 * @sync: non-zero if the write-buffer has to by synchronized
263 *
264 * This function writes data to the reserved space of journal head @jhead.
265 * Returns zero in case of success and a negative error code in case of
266 * failure.
267 */
write_head(struct ubifs_info * c,int jhead,void * buf,int len,int * lnum,int * offs,int sync)268 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
269 int *lnum, int *offs, int sync)
270 {
271 int err;
272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
273
274 ubifs_assert(c, jhead != GCHD);
275
276 *lnum = c->jheads[jhead].wbuf.lnum;
277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
278 dbg_jnl("jhead %s, LEB %d:%d, len %d",
279 dbg_jhead(jhead), *lnum, *offs, len);
280
281 if (ubifs_authenticated(c)) {
282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash);
283 if (err)
284 return err;
285 }
286
287 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
288 if (err)
289 return err;
290 if (sync)
291 err = ubifs_wbuf_sync_nolock(wbuf);
292 return err;
293 }
294
295 /**
296 * __queue_and_wait - queue a task and wait until the task is waked up.
297 * @c: UBIFS file-system description object
298 *
299 * This function adds current task in queue and waits until the task is waked
300 * up. This function should be called with @c->reserve_space_wq locked.
301 */
__queue_and_wait(struct ubifs_info * c)302 static void __queue_and_wait(struct ubifs_info *c)
303 {
304 DEFINE_WAIT(wait);
305
306 __add_wait_queue_entry_tail_exclusive(&c->reserve_space_wq, &wait);
307 set_current_state(TASK_UNINTERRUPTIBLE);
308 spin_unlock(&c->reserve_space_wq.lock);
309
310 schedule();
311 finish_wait(&c->reserve_space_wq, &wait);
312 }
313
314 /**
315 * wait_for_reservation - try queuing current task to wait until waked up.
316 * @c: UBIFS file-system description object
317 *
318 * This function queues current task to wait until waked up, if queuing is
319 * started(@c->need_wait_space is not %0). Returns %true if current task is
320 * added in queue, otherwise %false is returned.
321 */
wait_for_reservation(struct ubifs_info * c)322 static bool wait_for_reservation(struct ubifs_info *c)
323 {
324 if (likely(atomic_read(&c->need_wait_space) == 0))
325 /* Quick path to check whether queuing is started. */
326 return false;
327
328 spin_lock(&c->reserve_space_wq.lock);
329 if (atomic_read(&c->need_wait_space) == 0) {
330 /* Queuing is not started, don't queue current task. */
331 spin_unlock(&c->reserve_space_wq.lock);
332 return false;
333 }
334
335 __queue_and_wait(c);
336 return true;
337 }
338
339 /**
340 * wake_up_reservation - wake up first task in queue or stop queuing.
341 * @c: UBIFS file-system description object
342 *
343 * This function wakes up the first task in queue if it exists, or stops
344 * queuing if no tasks in queue.
345 */
wake_up_reservation(struct ubifs_info * c)346 static void wake_up_reservation(struct ubifs_info *c)
347 {
348 spin_lock(&c->reserve_space_wq.lock);
349 if (waitqueue_active(&c->reserve_space_wq))
350 wake_up_locked(&c->reserve_space_wq);
351 else
352 /*
353 * Compared with wait_for_reservation(), set @c->need_wait_space
354 * under the protection of wait queue lock, which can avoid that
355 * @c->need_wait_space is set to 0 after new task queued.
356 */
357 atomic_set(&c->need_wait_space, 0);
358 spin_unlock(&c->reserve_space_wq.lock);
359 }
360
361 /**
362 * add_or_start_queue - add current task in queue or start queuing.
363 * @c: UBIFS file-system description object
364 *
365 * This function starts queuing if queuing is not started, otherwise adds
366 * current task in queue.
367 */
add_or_start_queue(struct ubifs_info * c)368 static void add_or_start_queue(struct ubifs_info *c)
369 {
370 spin_lock(&c->reserve_space_wq.lock);
371 if (atomic_cmpxchg(&c->need_wait_space, 0, 1) == 0) {
372 /* Starts queuing, task can go on directly. */
373 spin_unlock(&c->reserve_space_wq.lock);
374 return;
375 }
376
377 /*
378 * There are at least two tasks have retried more than 32 times
379 * at certain point, first task has started queuing, just queue
380 * the left tasks.
381 */
382 __queue_and_wait(c);
383 }
384
385 /**
386 * make_reservation - reserve journal space.
387 * @c: UBIFS file-system description object
388 * @jhead: journal head
389 * @len: how many bytes to reserve
390 *
391 * This function makes space reservation in journal head @jhead. The function
392 * takes the commit lock and locks the journal head, and the caller has to
393 * unlock the head and finish the reservation with 'finish_reservation()'.
394 * Returns zero in case of success and a negative error code in case of
395 * failure.
396 *
397 * Note, the journal head may be unlocked as soon as the data is written, while
398 * the commit lock has to be released after the data has been added to the
399 * TNC.
400 */
make_reservation(struct ubifs_info * c,int jhead,int len)401 static int make_reservation(struct ubifs_info *c, int jhead, int len)
402 {
403 int err, cmt_retries = 0, nospc_retries = 0;
404 bool blocked = wait_for_reservation(c);
405
406 again:
407 down_read(&c->commit_sem);
408 err = reserve_space(c, jhead, len);
409 if (!err) {
410 /* c->commit_sem will get released via finish_reservation(). */
411 goto out_wake_up;
412 }
413 up_read(&c->commit_sem);
414
415 if (err == -ENOSPC) {
416 /*
417 * GC could not make any progress. We should try to commit
418 * because it could make some dirty space and GC would make
419 * progress, so make the error -EAGAIN so that the below
420 * will commit and re-try.
421 */
422 nospc_retries++;
423 dbg_jnl("no space, retry");
424 err = -EAGAIN;
425 }
426
427 if (err != -EAGAIN)
428 goto out;
429
430 /*
431 * -EAGAIN means that the journal is full or too large, or the above
432 * code wants to do one commit. Do this and re-try.
433 */
434 if (cmt_retries > 128) {
435 /*
436 * This should not happen unless:
437 * 1. The journal size limitations are too tough.
438 * 2. The budgeting is incorrect. We always have to be able to
439 * write to the media, because all operations are budgeted.
440 * Deletions are not budgeted, though, but we reserve an
441 * extra LEB for them.
442 */
443 ubifs_err(c, "stuck in space allocation, nospc_retries %d",
444 nospc_retries);
445 err = -ENOSPC;
446 goto out;
447 } else if (cmt_retries > 32) {
448 /*
449 * It's almost impossible to happen, unless there are many tasks
450 * making reservation concurrently and someone task has retried
451 * gc + commit for many times, generated available space during
452 * this period are grabbed by other tasks.
453 * But if it happens, start queuing up all tasks that will make
454 * space reservation, then there is only one task making space
455 * reservation at any time, and it can always make success under
456 * the premise of correct budgeting.
457 */
458 ubifs_warn(c, "too many space allocation cmt_retries (%d) "
459 "nospc_retries (%d), start queuing tasks",
460 cmt_retries, nospc_retries);
461
462 if (!blocked) {
463 blocked = true;
464 add_or_start_queue(c);
465 }
466 }
467
468 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
469 cmt_retries);
470 cmt_retries += 1;
471
472 err = ubifs_run_commit(c);
473 if (err)
474 goto out_wake_up;
475 goto again;
476
477 out:
478 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
479 len, jhead, err);
480 if (err == -ENOSPC) {
481 /* This are some budgeting problems, print useful information */
482 down_write(&c->commit_sem);
483 dump_stack();
484 ubifs_dump_budg(c, &c->bi);
485 ubifs_dump_lprops(c);
486 cmt_retries = dbg_check_lprops(c);
487 up_write(&c->commit_sem);
488 }
489 out_wake_up:
490 if (blocked) {
491 /*
492 * Only tasks that have ever started queuing or ever been queued
493 * can wake up other queued tasks, which can make sure that
494 * there is only one task waked up to make space reservation.
495 * For example:
496 * task A task B task C
497 * make_reservation make_reservation
498 * reserve_space // 0
499 * wake_up_reservation
500 * atomic_cmpxchg // 0, start queuing
501 * reserve_space
502 * wait_for_reservation
503 * __queue_and_wait
504 * add_wait_queue
505 * if (blocked) // false
506 * // So that task C won't be waked up to race with task B
507 */
508 wake_up_reservation(c);
509 }
510 return err;
511 }
512
513 /**
514 * release_head - release a journal head.
515 * @c: UBIFS file-system description object
516 * @jhead: journal head
517 *
518 * This function releases journal head @jhead which was locked by
519 * the 'make_reservation()' function. It has to be called after each successful
520 * 'make_reservation()' invocation.
521 */
release_head(struct ubifs_info * c,int jhead)522 static inline void release_head(struct ubifs_info *c, int jhead)
523 {
524 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
525 }
526
527 /**
528 * finish_reservation - finish a reservation.
529 * @c: UBIFS file-system description object
530 *
531 * This function finishes journal space reservation. It must be called after
532 * 'make_reservation()'.
533 */
finish_reservation(struct ubifs_info * c)534 static void finish_reservation(struct ubifs_info *c)
535 {
536 up_read(&c->commit_sem);
537 }
538
539 /**
540 * get_dent_type - translate VFS inode mode to UBIFS directory entry type.
541 * @mode: inode mode
542 */
get_dent_type(int mode)543 static int get_dent_type(int mode)
544 {
545 switch (mode & S_IFMT) {
546 case S_IFREG:
547 return UBIFS_ITYPE_REG;
548 case S_IFDIR:
549 return UBIFS_ITYPE_DIR;
550 case S_IFLNK:
551 return UBIFS_ITYPE_LNK;
552 case S_IFBLK:
553 return UBIFS_ITYPE_BLK;
554 case S_IFCHR:
555 return UBIFS_ITYPE_CHR;
556 case S_IFIFO:
557 return UBIFS_ITYPE_FIFO;
558 case S_IFSOCK:
559 return UBIFS_ITYPE_SOCK;
560 default:
561 BUG();
562 }
563 return 0;
564 }
565
566 /**
567 * pack_inode - pack an inode node.
568 * @c: UBIFS file-system description object
569 * @ino: buffer in which to pack inode node
570 * @inode: inode to pack
571 * @last: indicates the last node of the group
572 */
pack_inode(struct ubifs_info * c,struct ubifs_ino_node * ino,const struct inode * inode,int last)573 static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
574 const struct inode *inode, int last)
575 {
576 int data_len = 0, last_reference = !inode->i_nlink;
577 struct ubifs_inode *ui = ubifs_inode(inode);
578
579 ino->ch.node_type = UBIFS_INO_NODE;
580 ino_key_init_flash(c, &ino->key, inode->i_ino);
581 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
582 ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode));
583 ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
584 ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode));
585 ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
586 ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode));
587 ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
588 ino->uid = cpu_to_le32(i_uid_read(inode));
589 ino->gid = cpu_to_le32(i_gid_read(inode));
590 ino->mode = cpu_to_le32(inode->i_mode);
591 ino->flags = cpu_to_le32(ui->flags);
592 ino->size = cpu_to_le64(ui->ui_size);
593 ino->nlink = cpu_to_le32(inode->i_nlink);
594 ino->compr_type = cpu_to_le16(ui->compr_type);
595 ino->data_len = cpu_to_le32(ui->data_len);
596 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
597 ino->xattr_size = cpu_to_le32(ui->xattr_size);
598 ino->xattr_names = cpu_to_le32(ui->xattr_names);
599 zero_ino_node_unused(ino);
600
601 /*
602 * Drop the attached data if this is a deletion inode, the data is not
603 * needed anymore.
604 */
605 if (!last_reference) {
606 memcpy(ino->data, ui->data, ui->data_len);
607 data_len = ui->data_len;
608 }
609
610 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
611 }
612
613 /**
614 * mark_inode_clean - mark UBIFS inode as clean.
615 * @c: UBIFS file-system description object
616 * @ui: UBIFS inode to mark as clean
617 *
618 * This helper function marks UBIFS inode @ui as clean by cleaning the
619 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the
620 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would
621 * just do nothing.
622 */
mark_inode_clean(struct ubifs_info * c,struct ubifs_inode * ui)623 static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
624 {
625 if (ui->dirty)
626 ubifs_release_dirty_inode_budget(c, ui);
627 ui->dirty = 0;
628 }
629
set_dent_cookie(struct ubifs_info * c,struct ubifs_dent_node * dent)630 static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
631 {
632 if (c->double_hash)
633 dent->cookie = (__force __le32) get_random_u32();
634 else
635 dent->cookie = 0;
636 }
637
638 /**
639 * ubifs_jnl_update - update inode.
640 * @c: UBIFS file-system description object
641 * @dir: parent inode or host inode in case of extended attributes
642 * @nm: directory entry name
643 * @inode: inode to update
644 * @deletion: indicates a directory entry deletion i.e unlink or rmdir
645 * @xent: non-zero if the directory entry is an extended attribute entry
646 * @in_orphan: indicates whether the @inode is in orphan list
647 *
648 * This function updates an inode by writing a directory entry (or extended
649 * attribute entry), the inode itself, and the parent directory inode (or the
650 * host inode) to the journal.
651 *
652 * The function writes the host inode @dir last, which is important in case of
653 * extended attributes. Indeed, then we guarantee that if the host inode gets
654 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed,
655 * the extended attribute inode gets flushed too. And this is exactly what the
656 * user expects - synchronizing the host inode synchronizes its extended
657 * attributes. Similarly, this guarantees that if @dir is synchronized, its
658 * directory entry corresponding to @nm gets synchronized too.
659 *
660 * If the inode (@inode) or the parent directory (@dir) are synchronous, this
661 * function synchronizes the write-buffer.
662 *
663 * This function marks the @dir and @inode inodes as clean and returns zero on
664 * success. In case of failure, a negative error code is returned.
665 */
ubifs_jnl_update(struct ubifs_info * c,const struct inode * dir,const struct fscrypt_name * nm,const struct inode * inode,int deletion,int xent,int in_orphan)666 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
667 const struct fscrypt_name *nm, const struct inode *inode,
668 int deletion, int xent, int in_orphan)
669 {
670 int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
671 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
672 int last_reference = !!(deletion && inode->i_nlink == 0);
673 struct ubifs_inode *ui = ubifs_inode(inode);
674 struct ubifs_inode *host_ui = ubifs_inode(dir);
675 struct ubifs_dent_node *dent;
676 struct ubifs_ino_node *ino;
677 union ubifs_key dent_key, ino_key;
678 u8 hash_dent[UBIFS_HASH_ARR_SZ];
679 u8 hash_ino[UBIFS_HASH_ARR_SZ];
680 u8 hash_ino_host[UBIFS_HASH_ARR_SZ];
681
682 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
683
684 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
685 ilen = UBIFS_INO_NODE_SZ;
686
687 /*
688 * If the last reference to the inode is being deleted, then there is
689 * no need to attach and write inode data, it is being deleted anyway.
690 * And if the inode is being deleted, no need to synchronize
691 * write-buffer even if the inode is synchronous.
692 */
693 if (!last_reference) {
694 ilen += ui->data_len;
695 sync |= IS_SYNC(inode);
696 }
697
698 aligned_dlen = ALIGN(dlen, 8);
699 aligned_ilen = ALIGN(ilen, 8);
700
701 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
702 /* Make sure to also account for extended attributes */
703 if (ubifs_authenticated(c))
704 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c);
705 else
706 len += host_ui->data_len;
707
708 dent = kzalloc(len, GFP_NOFS);
709 if (!dent)
710 return -ENOMEM;
711
712 /* Make reservation before allocating sequence numbers */
713 err = make_reservation(c, BASEHD, len);
714 if (err)
715 goto out_free;
716
717 if (!xent) {
718 dent->ch.node_type = UBIFS_DENT_NODE;
719 if (fname_name(nm) == NULL)
720 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
721 else
722 dent_key_init(c, &dent_key, dir->i_ino, nm);
723 } else {
724 dent->ch.node_type = UBIFS_XENT_NODE;
725 xent_key_init(c, &dent_key, dir->i_ino, nm);
726 }
727
728 key_write(c, &dent_key, dent->key);
729 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
730 dent->type = get_dent_type(inode->i_mode);
731 dent->nlen = cpu_to_le16(fname_len(nm));
732 memcpy(dent->name, fname_name(nm), fname_len(nm));
733 dent->name[fname_len(nm)] = '\0';
734 set_dent_cookie(c, dent);
735
736 zero_dent_node_unused(dent);
737 ubifs_prep_grp_node(c, dent, dlen, 0);
738 err = ubifs_node_calc_hash(c, dent, hash_dent);
739 if (err)
740 goto out_release;
741
742 ino = (void *)dent + aligned_dlen;
743 pack_inode(c, ino, inode, 0);
744 err = ubifs_node_calc_hash(c, ino, hash_ino);
745 if (err)
746 goto out_release;
747
748 ino = (void *)ino + aligned_ilen;
749 pack_inode(c, ino, dir, 1);
750 err = ubifs_node_calc_hash(c, ino, hash_ino_host);
751 if (err)
752 goto out_release;
753
754 if (last_reference && !in_orphan) {
755 err = ubifs_add_orphan(c, inode->i_ino);
756 if (err) {
757 release_head(c, BASEHD);
758 goto out_finish;
759 }
760 ui->del_cmtno = c->cmt_no;
761 orphan_added = 1;
762 }
763
764 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
765 if (err)
766 goto out_release;
767 if (!sync) {
768 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
769
770 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
771 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
772 }
773 release_head(c, BASEHD);
774 kfree(dent);
775 ubifs_add_auth_dirt(c, lnum);
776
777 if (deletion) {
778 if (fname_name(nm) == NULL)
779 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
780 else
781 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
782 if (err)
783 goto out_ro;
784 err = ubifs_add_dirt(c, lnum, dlen);
785 } else
786 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen,
787 hash_dent, nm);
788 if (err)
789 goto out_ro;
790
791 /*
792 * Note, we do not remove the inode from TNC even if the last reference
793 * to it has just been deleted, because the inode may still be opened.
794 * Instead, the inode has been added to orphan lists and the orphan
795 * subsystem will take further care about it.
796 */
797 ino_key_init(c, &ino_key, inode->i_ino);
798 ino_offs = dent_offs + aligned_dlen;
799 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino);
800 if (err)
801 goto out_ro;
802
803 ino_key_init(c, &ino_key, dir->i_ino);
804 ino_offs += aligned_ilen;
805 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
806 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host);
807 if (err)
808 goto out_ro;
809
810 if (in_orphan && inode->i_nlink)
811 ubifs_delete_orphan(c, inode->i_ino);
812
813 finish_reservation(c);
814 spin_lock(&ui->ui_lock);
815 ui->synced_i_size = ui->ui_size;
816 spin_unlock(&ui->ui_lock);
817 if (xent) {
818 spin_lock(&host_ui->ui_lock);
819 host_ui->synced_i_size = host_ui->ui_size;
820 spin_unlock(&host_ui->ui_lock);
821 }
822 mark_inode_clean(c, ui);
823 mark_inode_clean(c, host_ui);
824 return 0;
825
826 out_finish:
827 finish_reservation(c);
828 out_free:
829 kfree(dent);
830 return err;
831
832 out_release:
833 release_head(c, BASEHD);
834 kfree(dent);
835 out_ro:
836 ubifs_ro_mode(c, err);
837 if (orphan_added)
838 ubifs_delete_orphan(c, inode->i_ino);
839 finish_reservation(c);
840 return err;
841 }
842
843 /**
844 * ubifs_jnl_write_data - write a data node to the journal.
845 * @c: UBIFS file-system description object
846 * @inode: inode the data node belongs to
847 * @key: node key
848 * @buf: buffer to write
849 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
850 *
851 * This function writes a data node to the journal. Returns %0 if the data node
852 * was successfully written, and a negative error code in case of failure.
853 */
ubifs_jnl_write_data(struct ubifs_info * c,const struct inode * inode,const union ubifs_key * key,const void * buf,int len)854 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
855 const union ubifs_key *key, const void *buf, int len)
856 {
857 struct ubifs_data_node *data;
858 int err, lnum, offs, compr_type, out_len, compr_len, auth_len;
859 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
860 int write_len;
861 struct ubifs_inode *ui = ubifs_inode(inode);
862 bool encrypted = IS_ENCRYPTED(inode);
863 u8 hash[UBIFS_HASH_ARR_SZ];
864
865 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
866 (unsigned long)key_inum(c, key), key_block(c, key), len);
867 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE);
868
869 if (encrypted)
870 dlen += UBIFS_CIPHER_BLOCK_SIZE;
871
872 auth_len = ubifs_auth_node_sz(c);
873
874 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN);
875 if (!data) {
876 /*
877 * Fall-back to the write reserve buffer. Note, we might be
878 * currently on the memory reclaim path, when the kernel is
879 * trying to free some memory by writing out dirty pages. The
880 * write reserve buffer helps us to guarantee that we are
881 * always able to write the data.
882 */
883 allocated = 0;
884 mutex_lock(&c->write_reserve_mutex);
885 data = c->write_reserve_buf;
886 }
887
888 data->ch.node_type = UBIFS_DATA_NODE;
889 key_write(c, key, &data->key);
890 data->size = cpu_to_le32(len);
891
892 if (!(ui->flags & UBIFS_COMPR_FL))
893 /* Compression is disabled for this inode */
894 compr_type = UBIFS_COMPR_NONE;
895 else
896 compr_type = ui->compr_type;
897
898 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
899 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
900 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
901
902 if (encrypted) {
903 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
904 if (err)
905 goto out_free;
906
907 } else {
908 data->compr_size = 0;
909 out_len = compr_len;
910 }
911
912 dlen = UBIFS_DATA_NODE_SZ + out_len;
913 if (ubifs_authenticated(c))
914 write_len = ALIGN(dlen, 8) + auth_len;
915 else
916 write_len = dlen;
917
918 data->compr_type = cpu_to_le16(compr_type);
919
920 /* Make reservation before allocating sequence numbers */
921 err = make_reservation(c, DATAHD, write_len);
922 if (err)
923 goto out_free;
924
925 ubifs_prepare_node(c, data, dlen, 0);
926 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0);
927 if (err)
928 goto out_release;
929
930 err = ubifs_node_calc_hash(c, data, hash);
931 if (err)
932 goto out_release;
933
934 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
935 release_head(c, DATAHD);
936
937 ubifs_add_auth_dirt(c, lnum);
938
939 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash);
940 if (err)
941 goto out_ro;
942
943 finish_reservation(c);
944 if (!allocated)
945 mutex_unlock(&c->write_reserve_mutex);
946 else
947 kfree(data);
948 return 0;
949
950 out_release:
951 release_head(c, DATAHD);
952 out_ro:
953 ubifs_ro_mode(c, err);
954 finish_reservation(c);
955 out_free:
956 if (!allocated)
957 mutex_unlock(&c->write_reserve_mutex);
958 else
959 kfree(data);
960 return err;
961 }
962
963 /**
964 * ubifs_jnl_write_inode - flush inode to the journal.
965 * @c: UBIFS file-system description object
966 * @inode: inode to flush
967 *
968 * This function writes inode @inode to the journal. If the inode is
969 * synchronous, it also synchronizes the write-buffer. Returns zero in case of
970 * success and a negative error code in case of failure.
971 */
ubifs_jnl_write_inode(struct ubifs_info * c,const struct inode * inode)972 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
973 {
974 int err, lnum, offs;
975 struct ubifs_ino_node *ino, *ino_start;
976 struct ubifs_inode *ui = ubifs_inode(inode);
977 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ;
978 int last_reference = !inode->i_nlink;
979 int kill_xattrs = ui->xattr_cnt && last_reference;
980 u8 hash[UBIFS_HASH_ARR_SZ];
981
982 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
983
984 /*
985 * If the inode is being deleted, do not write the attached data. No
986 * need to synchronize the write-buffer either.
987 */
988 if (!last_reference) {
989 ilen += ui->data_len;
990 sync = IS_SYNC(inode);
991 } else if (kill_xattrs) {
992 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt;
993 }
994
995 if (ubifs_authenticated(c))
996 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c);
997 else
998 write_len += ilen;
999
1000 ino_start = ino = kmalloc(write_len, GFP_NOFS);
1001 if (!ino)
1002 return -ENOMEM;
1003
1004 /* Make reservation before allocating sequence numbers */
1005 err = make_reservation(c, BASEHD, write_len);
1006 if (err)
1007 goto out_free;
1008
1009 if (kill_xattrs) {
1010 union ubifs_key key;
1011 struct fscrypt_name nm = {0};
1012 struct inode *xino;
1013 struct ubifs_dent_node *xent, *pxent = NULL;
1014
1015 if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
1016 err = -EPERM;
1017 ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
1018 goto out_release;
1019 }
1020
1021 lowest_xent_key(c, &key, inode->i_ino);
1022 while (1) {
1023 xent = ubifs_tnc_next_ent(c, &key, &nm);
1024 if (IS_ERR(xent)) {
1025 err = PTR_ERR(xent);
1026 if (err == -ENOENT)
1027 break;
1028
1029 kfree(pxent);
1030 goto out_release;
1031 }
1032
1033 fname_name(&nm) = xent->name;
1034 fname_len(&nm) = le16_to_cpu(xent->nlen);
1035
1036 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
1037 if (IS_ERR(xino)) {
1038 err = PTR_ERR(xino);
1039 ubifs_err(c, "dead directory entry '%s', error %d",
1040 xent->name, err);
1041 ubifs_ro_mode(c, err);
1042 kfree(pxent);
1043 kfree(xent);
1044 goto out_release;
1045 }
1046 ubifs_assert(c, ubifs_inode(xino)->xattr);
1047
1048 clear_nlink(xino);
1049 pack_inode(c, ino, xino, 0);
1050 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1051 iput(xino);
1052
1053 kfree(pxent);
1054 pxent = xent;
1055 key_read(c, &xent->key, &key);
1056 }
1057 kfree(pxent);
1058 }
1059
1060 pack_inode(c, ino, inode, 1);
1061 err = ubifs_node_calc_hash(c, ino, hash);
1062 if (err)
1063 goto out_release;
1064
1065 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync);
1066 if (err)
1067 goto out_release;
1068 if (!sync)
1069 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1070 inode->i_ino);
1071 release_head(c, BASEHD);
1072
1073 if (last_reference) {
1074 err = ubifs_tnc_remove_ino(c, inode->i_ino);
1075 if (err)
1076 goto out_ro;
1077 ubifs_delete_orphan(c, inode->i_ino);
1078 err = ubifs_add_dirt(c, lnum, write_len);
1079 } else {
1080 union ubifs_key key;
1081
1082 ubifs_add_auth_dirt(c, lnum);
1083
1084 ino_key_init(c, &key, inode->i_ino);
1085 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
1086 }
1087 if (err)
1088 goto out_ro;
1089
1090 finish_reservation(c);
1091 spin_lock(&ui->ui_lock);
1092 ui->synced_i_size = ui->ui_size;
1093 spin_unlock(&ui->ui_lock);
1094 kfree(ino_start);
1095 return 0;
1096
1097 out_release:
1098 release_head(c, BASEHD);
1099 out_ro:
1100 ubifs_ro_mode(c, err);
1101 finish_reservation(c);
1102 out_free:
1103 kfree(ino_start);
1104 return err;
1105 }
1106
1107 /**
1108 * ubifs_jnl_delete_inode - delete an inode.
1109 * @c: UBIFS file-system description object
1110 * @inode: inode to delete
1111 *
1112 * This function deletes inode @inode which includes removing it from orphans,
1113 * deleting it from TNC and, in some cases, writing a deletion inode to the
1114 * journal.
1115 *
1116 * When regular file inodes are unlinked or a directory inode is removed, the
1117 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
1118 * direntry to the media, and adds the inode to orphans. After this, when the
1119 * last reference to this inode has been dropped, this function is called. In
1120 * general, it has to write one more deletion inode to the media, because if
1121 * a commit happened between 'ubifs_jnl_update()' and
1122 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
1123 * anymore, and in fact it might not be on the flash anymore, because it might
1124 * have been garbage-collected already. And for optimization reasons UBIFS does
1125 * not read the orphan area if it has been unmounted cleanly, so it would have
1126 * no indication in the journal that there is a deleted inode which has to be
1127 * removed from TNC.
1128 *
1129 * However, if there was no commit between 'ubifs_jnl_update()' and
1130 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
1131 * inode to the media for the second time. And this is quite a typical case.
1132 *
1133 * This function returns zero in case of success and a negative error code in
1134 * case of failure.
1135 */
ubifs_jnl_delete_inode(struct ubifs_info * c,const struct inode * inode)1136 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
1137 {
1138 int err;
1139 struct ubifs_inode *ui = ubifs_inode(inode);
1140
1141 ubifs_assert(c, inode->i_nlink == 0);
1142
1143 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no)
1144 /* A commit happened for sure or inode hosts xattrs */
1145 return ubifs_jnl_write_inode(c, inode);
1146
1147 down_read(&c->commit_sem);
1148 /*
1149 * Check commit number again, because the first test has been done
1150 * without @c->commit_sem, so a commit might have happened.
1151 */
1152 if (ui->del_cmtno != c->cmt_no) {
1153 up_read(&c->commit_sem);
1154 return ubifs_jnl_write_inode(c, inode);
1155 }
1156
1157 err = ubifs_tnc_remove_ino(c, inode->i_ino);
1158 if (err)
1159 ubifs_ro_mode(c, err);
1160 else
1161 ubifs_delete_orphan(c, inode->i_ino);
1162 up_read(&c->commit_sem);
1163 return err;
1164 }
1165
1166 /**
1167 * ubifs_jnl_xrename - cross rename two directory entries.
1168 * @c: UBIFS file-system description object
1169 * @fst_dir: parent inode of 1st directory entry to exchange
1170 * @fst_inode: 1st inode to exchange
1171 * @fst_nm: name of 1st inode to exchange
1172 * @snd_dir: parent inode of 2nd directory entry to exchange
1173 * @snd_inode: 2nd inode to exchange
1174 * @snd_nm: name of 2nd inode to exchange
1175 * @sync: non-zero if the write-buffer has to be synchronized
1176 *
1177 * This function implements the cross rename operation which may involve
1178 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
1179 * and returns zero on success. In case of failure, a negative error code is
1180 * returned.
1181 */
ubifs_jnl_xrename(struct ubifs_info * c,const struct inode * fst_dir,const struct inode * fst_inode,const struct fscrypt_name * fst_nm,const struct inode * snd_dir,const struct inode * snd_inode,const struct fscrypt_name * snd_nm,int sync)1182 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
1183 const struct inode *fst_inode,
1184 const struct fscrypt_name *fst_nm,
1185 const struct inode *snd_dir,
1186 const struct inode *snd_inode,
1187 const struct fscrypt_name *snd_nm, int sync)
1188 {
1189 union ubifs_key key;
1190 struct ubifs_dent_node *dent1, *dent2;
1191 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
1192 int aligned_dlen1, aligned_dlen2;
1193 int twoparents = (fst_dir != snd_dir);
1194 void *p;
1195 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1196 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
1197 u8 hash_p1[UBIFS_HASH_ARR_SZ];
1198 u8 hash_p2[UBIFS_HASH_ARR_SZ];
1199
1200 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
1201 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
1202 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
1203 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
1204
1205 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
1206 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
1207 aligned_dlen1 = ALIGN(dlen1, 8);
1208 aligned_dlen2 = ALIGN(dlen2, 8);
1209
1210 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
1211 if (twoparents)
1212 len += plen;
1213
1214 len += ubifs_auth_node_sz(c);
1215
1216 dent1 = kzalloc(len, GFP_NOFS);
1217 if (!dent1)
1218 return -ENOMEM;
1219
1220 /* Make reservation before allocating sequence numbers */
1221 err = make_reservation(c, BASEHD, len);
1222 if (err)
1223 goto out_free;
1224
1225 /* Make new dent for 1st entry */
1226 dent1->ch.node_type = UBIFS_DENT_NODE;
1227 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
1228 dent1->inum = cpu_to_le64(fst_inode->i_ino);
1229 dent1->type = get_dent_type(fst_inode->i_mode);
1230 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
1231 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
1232 dent1->name[fname_len(snd_nm)] = '\0';
1233 set_dent_cookie(c, dent1);
1234 zero_dent_node_unused(dent1);
1235 ubifs_prep_grp_node(c, dent1, dlen1, 0);
1236 err = ubifs_node_calc_hash(c, dent1, hash_dent1);
1237 if (err)
1238 goto out_release;
1239
1240 /* Make new dent for 2nd entry */
1241 dent2 = (void *)dent1 + aligned_dlen1;
1242 dent2->ch.node_type = UBIFS_DENT_NODE;
1243 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
1244 dent2->inum = cpu_to_le64(snd_inode->i_ino);
1245 dent2->type = get_dent_type(snd_inode->i_mode);
1246 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
1247 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
1248 dent2->name[fname_len(fst_nm)] = '\0';
1249 set_dent_cookie(c, dent2);
1250 zero_dent_node_unused(dent2);
1251 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1252 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1253 if (err)
1254 goto out_release;
1255
1256 p = (void *)dent2 + aligned_dlen2;
1257 if (!twoparents) {
1258 pack_inode(c, p, fst_dir, 1);
1259 err = ubifs_node_calc_hash(c, p, hash_p1);
1260 if (err)
1261 goto out_release;
1262 } else {
1263 pack_inode(c, p, fst_dir, 0);
1264 err = ubifs_node_calc_hash(c, p, hash_p1);
1265 if (err)
1266 goto out_release;
1267 p += ALIGN(plen, 8);
1268 pack_inode(c, p, snd_dir, 1);
1269 err = ubifs_node_calc_hash(c, p, hash_p2);
1270 if (err)
1271 goto out_release;
1272 }
1273
1274 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1275 if (err)
1276 goto out_release;
1277 if (!sync) {
1278 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1279
1280 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1281 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1282 }
1283 release_head(c, BASEHD);
1284
1285 ubifs_add_auth_dirt(c, lnum);
1286
1287 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
1288 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm);
1289 if (err)
1290 goto out_ro;
1291
1292 offs += aligned_dlen1;
1293 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
1294 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm);
1295 if (err)
1296 goto out_ro;
1297
1298 offs += aligned_dlen2;
1299
1300 ino_key_init(c, &key, fst_dir->i_ino);
1301 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1);
1302 if (err)
1303 goto out_ro;
1304
1305 if (twoparents) {
1306 offs += ALIGN(plen, 8);
1307 ino_key_init(c, &key, snd_dir->i_ino);
1308 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2);
1309 if (err)
1310 goto out_ro;
1311 }
1312
1313 finish_reservation(c);
1314
1315 mark_inode_clean(c, ubifs_inode(fst_dir));
1316 if (twoparents)
1317 mark_inode_clean(c, ubifs_inode(snd_dir));
1318 kfree(dent1);
1319 return 0;
1320
1321 out_release:
1322 release_head(c, BASEHD);
1323 out_ro:
1324 ubifs_ro_mode(c, err);
1325 finish_reservation(c);
1326 out_free:
1327 kfree(dent1);
1328 return err;
1329 }
1330
1331 /**
1332 * ubifs_jnl_rename - rename a directory entry.
1333 * @c: UBIFS file-system description object
1334 * @old_dir: parent inode of directory entry to rename
1335 * @old_inode: directory entry's inode to rename
1336 * @old_nm: name of the old directory entry to rename
1337 * @new_dir: parent inode of directory entry to rename
1338 * @new_inode: new directory entry's inode (or directory entry's inode to
1339 * replace)
1340 * @new_nm: new name of the new directory entry
1341 * @whiteout: whiteout inode
1342 * @sync: non-zero if the write-buffer has to be synchronized
1343 * @delete_orphan: indicates an orphan entry deletion for @whiteout
1344 *
1345 * This function implements the re-name operation which may involve writing up
1346 * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
1347 * and 2 directory entries. It marks the written inodes as clean and returns
1348 * zero on success. In case of failure, a negative error code is returned.
1349 */
ubifs_jnl_rename(struct ubifs_info * c,const struct inode * old_dir,const struct inode * old_inode,const struct fscrypt_name * old_nm,const struct inode * new_dir,const struct inode * new_inode,const struct fscrypt_name * new_nm,const struct inode * whiteout,int sync,int delete_orphan)1350 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
1351 const struct inode *old_inode,
1352 const struct fscrypt_name *old_nm,
1353 const struct inode *new_dir,
1354 const struct inode *new_inode,
1355 const struct fscrypt_name *new_nm,
1356 const struct inode *whiteout, int sync, int delete_orphan)
1357 {
1358 void *p;
1359 union ubifs_key key;
1360 struct ubifs_dent_node *dent, *dent2;
1361 int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
1362 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1363 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1364 int move = (old_dir != new_dir);
1365 struct ubifs_inode *new_ui, *whiteout_ui;
1366 u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
1367 u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
1368 u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
1369 u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
1370 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1371 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
1372
1373 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
1374 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
1375 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1376 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
1377
1378 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1379 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
1380 if (new_inode) {
1381 new_ui = ubifs_inode(new_inode);
1382 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex));
1383 ilen = UBIFS_INO_NODE_SZ;
1384 if (!last_reference)
1385 ilen += new_ui->data_len;
1386 } else
1387 ilen = 0;
1388
1389 if (whiteout) {
1390 whiteout_ui = ubifs_inode(whiteout);
1391 ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
1392 ubifs_assert(c, whiteout->i_nlink == 1);
1393 ubifs_assert(c, !whiteout_ui->dirty);
1394 wlen = UBIFS_INO_NODE_SZ;
1395 wlen += whiteout_ui->data_len;
1396 } else
1397 wlen = 0;
1398
1399 aligned_dlen1 = ALIGN(dlen1, 8);
1400 aligned_dlen2 = ALIGN(dlen2, 8);
1401 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
1402 ALIGN(wlen, 8) + ALIGN(plen, 8);
1403 if (move)
1404 len += plen;
1405
1406 len += ubifs_auth_node_sz(c);
1407
1408 dent = kzalloc(len, GFP_NOFS);
1409 if (!dent)
1410 return -ENOMEM;
1411
1412 /* Make reservation before allocating sequence numbers */
1413 err = make_reservation(c, BASEHD, len);
1414 if (err)
1415 goto out_free;
1416
1417 /* Make new dent */
1418 dent->ch.node_type = UBIFS_DENT_NODE;
1419 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
1420 dent->inum = cpu_to_le64(old_inode->i_ino);
1421 dent->type = get_dent_type(old_inode->i_mode);
1422 dent->nlen = cpu_to_le16(fname_len(new_nm));
1423 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1424 dent->name[fname_len(new_nm)] = '\0';
1425 set_dent_cookie(c, dent);
1426 zero_dent_node_unused(dent);
1427 ubifs_prep_grp_node(c, dent, dlen1, 0);
1428 err = ubifs_node_calc_hash(c, dent, hash_dent1);
1429 if (err)
1430 goto out_release;
1431
1432 dent2 = (void *)dent + aligned_dlen1;
1433 dent2->ch.node_type = UBIFS_DENT_NODE;
1434 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
1435
1436 if (whiteout) {
1437 dent2->inum = cpu_to_le64(whiteout->i_ino);
1438 dent2->type = get_dent_type(whiteout->i_mode);
1439 } else {
1440 /* Make deletion dent */
1441 dent2->inum = 0;
1442 dent2->type = DT_UNKNOWN;
1443 }
1444 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1445 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1446 dent2->name[fname_len(old_nm)] = '\0';
1447 set_dent_cookie(c, dent2);
1448 zero_dent_node_unused(dent2);
1449 ubifs_prep_grp_node(c, dent2, dlen2, 0);
1450 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1451 if (err)
1452 goto out_release;
1453
1454 p = (void *)dent2 + aligned_dlen2;
1455 if (new_inode) {
1456 pack_inode(c, p, new_inode, 0);
1457 err = ubifs_node_calc_hash(c, p, hash_new_inode);
1458 if (err)
1459 goto out_release;
1460
1461 p += ALIGN(ilen, 8);
1462 }
1463
1464 if (whiteout) {
1465 pack_inode(c, p, whiteout, 0);
1466 err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
1467 if (err)
1468 goto out_release;
1469
1470 p += ALIGN(wlen, 8);
1471 }
1472
1473 if (!move) {
1474 pack_inode(c, p, old_dir, 1);
1475 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1476 if (err)
1477 goto out_release;
1478 } else {
1479 pack_inode(c, p, old_dir, 0);
1480 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1481 if (err)
1482 goto out_release;
1483
1484 p += ALIGN(plen, 8);
1485 pack_inode(c, p, new_dir, 1);
1486 err = ubifs_node_calc_hash(c, p, hash_new_dir);
1487 if (err)
1488 goto out_release;
1489 }
1490
1491 if (last_reference) {
1492 err = ubifs_add_orphan(c, new_inode->i_ino);
1493 if (err) {
1494 release_head(c, BASEHD);
1495 goto out_finish;
1496 }
1497 new_ui->del_cmtno = c->cmt_no;
1498 orphan_added = 1;
1499 }
1500
1501 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1502 if (err)
1503 goto out_release;
1504 if (!sync) {
1505 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1506
1507 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1508 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1509 if (new_inode)
1510 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1511 new_inode->i_ino);
1512 if (whiteout)
1513 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1514 whiteout->i_ino);
1515 }
1516 release_head(c, BASEHD);
1517
1518 ubifs_add_auth_dirt(c, lnum);
1519
1520 dent_key_init(c, &key, new_dir->i_ino, new_nm);
1521 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm);
1522 if (err)
1523 goto out_ro;
1524
1525 offs += aligned_dlen1;
1526 if (whiteout) {
1527 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1528 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
1529 if (err)
1530 goto out_ro;
1531 } else {
1532 err = ubifs_add_dirt(c, lnum, dlen2);
1533 if (err)
1534 goto out_ro;
1535
1536 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1537 err = ubifs_tnc_remove_nm(c, &key, old_nm);
1538 if (err)
1539 goto out_ro;
1540 }
1541
1542 offs += aligned_dlen2;
1543 if (new_inode) {
1544 ino_key_init(c, &key, new_inode->i_ino);
1545 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode);
1546 if (err)
1547 goto out_ro;
1548 offs += ALIGN(ilen, 8);
1549 }
1550
1551 if (whiteout) {
1552 ino_key_init(c, &key, whiteout->i_ino);
1553 err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
1554 hash_whiteout_inode);
1555 if (err)
1556 goto out_ro;
1557 offs += ALIGN(wlen, 8);
1558 }
1559
1560 ino_key_init(c, &key, old_dir->i_ino);
1561 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
1562 if (err)
1563 goto out_ro;
1564
1565 if (move) {
1566 offs += ALIGN(plen, 8);
1567 ino_key_init(c, &key, new_dir->i_ino);
1568 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir);
1569 if (err)
1570 goto out_ro;
1571 }
1572
1573 if (delete_orphan)
1574 ubifs_delete_orphan(c, whiteout->i_ino);
1575
1576 finish_reservation(c);
1577 if (new_inode) {
1578 mark_inode_clean(c, new_ui);
1579 spin_lock(&new_ui->ui_lock);
1580 new_ui->synced_i_size = new_ui->ui_size;
1581 spin_unlock(&new_ui->ui_lock);
1582 }
1583 /*
1584 * No need to mark whiteout inode clean.
1585 * Whiteout doesn't have non-zero size, no need to update
1586 * synced_i_size for whiteout_ui.
1587 */
1588 mark_inode_clean(c, ubifs_inode(old_dir));
1589 if (move)
1590 mark_inode_clean(c, ubifs_inode(new_dir));
1591 kfree(dent);
1592 return 0;
1593
1594 out_release:
1595 release_head(c, BASEHD);
1596 out_ro:
1597 ubifs_ro_mode(c, err);
1598 if (orphan_added)
1599 ubifs_delete_orphan(c, new_inode->i_ino);
1600 out_finish:
1601 finish_reservation(c);
1602 out_free:
1603 kfree(dent);
1604 return err;
1605 }
1606
1607 /**
1608 * truncate_data_node - re-compress/encrypt a truncated data node.
1609 * @c: UBIFS file-system description object
1610 * @inode: inode which refers to the data node
1611 * @block: data block number
1612 * @dn: data node to re-compress
1613 * @new_len: new length
1614 * @dn_size: size of the data node @dn in memory
1615 *
1616 * This function is used when an inode is truncated and the last data node of
1617 * the inode has to be re-compressed/encrypted and re-written.
1618 */
truncate_data_node(const struct ubifs_info * c,const struct inode * inode,unsigned int block,struct ubifs_data_node * dn,int * new_len,int dn_size)1619 static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1620 unsigned int block, struct ubifs_data_node *dn,
1621 int *new_len, int dn_size)
1622 {
1623 void *buf;
1624 int err, dlen, compr_type, out_len, data_size;
1625
1626 out_len = le32_to_cpu(dn->size);
1627 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
1628 if (!buf)
1629 return -ENOMEM;
1630
1631 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1632 data_size = dn_size - UBIFS_DATA_NODE_SZ;
1633 compr_type = le16_to_cpu(dn->compr_type);
1634
1635 if (IS_ENCRYPTED(inode)) {
1636 err = ubifs_decrypt(inode, dn, &dlen, block);
1637 if (err)
1638 goto out;
1639 }
1640
1641 if (compr_type == UBIFS_COMPR_NONE) {
1642 out_len = *new_len;
1643 } else {
1644 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1645 if (err)
1646 goto out;
1647
1648 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1649 }
1650
1651 if (IS_ENCRYPTED(inode)) {
1652 err = ubifs_encrypt(inode, dn, out_len, &data_size, block);
1653 if (err)
1654 goto out;
1655
1656 out_len = data_size;
1657 } else {
1658 dn->compr_size = 0;
1659 }
1660
1661 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE);
1662 dn->compr_type = cpu_to_le16(compr_type);
1663 dn->size = cpu_to_le32(*new_len);
1664 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1665 err = 0;
1666 out:
1667 kfree(buf);
1668 return err;
1669 }
1670
1671 /**
1672 * ubifs_jnl_truncate - update the journal for a truncation.
1673 * @c: UBIFS file-system description object
1674 * @inode: inode to truncate
1675 * @old_size: old size
1676 * @new_size: new size
1677 *
1678 * When the size of a file decreases due to truncation, a truncation node is
1679 * written, the journal tree is updated, and the last data block is re-written
1680 * if it has been affected. The inode is also updated in order to synchronize
1681 * the new inode size.
1682 *
1683 * This function marks the inode as clean and returns zero on success. In case
1684 * of failure, a negative error code is returned.
1685 */
ubifs_jnl_truncate(struct ubifs_info * c,const struct inode * inode,loff_t old_size,loff_t new_size)1686 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1687 loff_t old_size, loff_t new_size)
1688 {
1689 union ubifs_key key, to_key;
1690 struct ubifs_ino_node *ino;
1691 struct ubifs_trun_node *trun;
1692 struct ubifs_data_node *dn;
1693 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1694 int dn_size;
1695 struct ubifs_inode *ui = ubifs_inode(inode);
1696 ino_t inum = inode->i_ino;
1697 unsigned int blk;
1698 u8 hash_ino[UBIFS_HASH_ARR_SZ];
1699 u8 hash_dn[UBIFS_HASH_ARR_SZ];
1700
1701 dbg_jnl("ino %lu, size %lld -> %lld",
1702 (unsigned long)inum, old_size, new_size);
1703 ubifs_assert(c, !ui->data_len);
1704 ubifs_assert(c, S_ISREG(inode->i_mode));
1705 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
1706
1707 dn_size = COMPRESSED_DATA_NODE_BUF_SZ;
1708
1709 if (IS_ENCRYPTED(inode))
1710 dn_size += UBIFS_CIPHER_BLOCK_SIZE;
1711
1712 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1713 dn_size + ubifs_auth_node_sz(c);
1714
1715 ino = kmalloc(sz, GFP_NOFS);
1716 if (!ino)
1717 return -ENOMEM;
1718
1719 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1720 trun->ch.node_type = UBIFS_TRUN_NODE;
1721 trun->inum = cpu_to_le32(inum);
1722 trun->old_size = cpu_to_le64(old_size);
1723 trun->new_size = cpu_to_le64(new_size);
1724 zero_trun_node_unused(trun);
1725
1726 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1727 if (dlen) {
1728 /* Get last data block so it can be truncated */
1729 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1730 blk = new_size >> UBIFS_BLOCK_SHIFT;
1731 data_key_init(c, &key, inum, blk);
1732 dbg_jnlk(&key, "last block key ");
1733 err = ubifs_tnc_lookup(c, &key, dn);
1734 if (err == -ENOENT)
1735 dlen = 0; /* Not found (so it is a hole) */
1736 else if (err)
1737 goto out_free;
1738 else {
1739 int dn_len = le32_to_cpu(dn->size);
1740
1741 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1742 ubifs_err(c, "bad data node (block %u, inode %lu)",
1743 blk, inode->i_ino);
1744 ubifs_dump_node(c, dn, dn_size);
1745 err = -EUCLEAN;
1746 goto out_free;
1747 }
1748
1749 if (dn_len <= dlen)
1750 dlen = 0; /* Nothing to do */
1751 else {
1752 err = truncate_data_node(c, inode, blk, dn,
1753 &dlen, dn_size);
1754 if (err)
1755 goto out_free;
1756 }
1757 }
1758 }
1759
1760 /* Must make reservation before allocating sequence numbers */
1761 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
1762
1763 if (ubifs_authenticated(c))
1764 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c);
1765 else
1766 len += dlen;
1767
1768 err = make_reservation(c, BASEHD, len);
1769 if (err)
1770 goto out_free;
1771
1772 pack_inode(c, ino, inode, 0);
1773 err = ubifs_node_calc_hash(c, ino, hash_ino);
1774 if (err)
1775 goto out_release;
1776
1777 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1778 if (dlen) {
1779 ubifs_prep_grp_node(c, dn, dlen, 1);
1780 err = ubifs_node_calc_hash(c, dn, hash_dn);
1781 if (err)
1782 goto out_release;
1783 }
1784
1785 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1786 if (err)
1787 goto out_release;
1788 if (!sync)
1789 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1790 release_head(c, BASEHD);
1791
1792 ubifs_add_auth_dirt(c, lnum);
1793
1794 if (dlen) {
1795 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1796 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn);
1797 if (err)
1798 goto out_ro;
1799 }
1800
1801 ino_key_init(c, &key, inum);
1802 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino);
1803 if (err)
1804 goto out_ro;
1805
1806 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1807 if (err)
1808 goto out_ro;
1809
1810 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1811 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1812 data_key_init(c, &key, inum, blk);
1813
1814 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1815 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1816 data_key_init(c, &to_key, inum, blk);
1817
1818 err = ubifs_tnc_remove_range(c, &key, &to_key);
1819 if (err)
1820 goto out_ro;
1821
1822 finish_reservation(c);
1823 spin_lock(&ui->ui_lock);
1824 ui->synced_i_size = ui->ui_size;
1825 spin_unlock(&ui->ui_lock);
1826 mark_inode_clean(c, ui);
1827 kfree(ino);
1828 return 0;
1829
1830 out_release:
1831 release_head(c, BASEHD);
1832 out_ro:
1833 ubifs_ro_mode(c, err);
1834 finish_reservation(c);
1835 out_free:
1836 kfree(ino);
1837 return err;
1838 }
1839
1840
1841 /**
1842 * ubifs_jnl_delete_xattr - delete an extended attribute.
1843 * @c: UBIFS file-system description object
1844 * @host: host inode
1845 * @inode: extended attribute inode
1846 * @nm: extended attribute entry name
1847 *
1848 * This function delete an extended attribute which is very similar to
1849 * un-linking regular files - it writes a deletion xentry, a deletion inode and
1850 * updates the target inode. Returns zero in case of success and a negative
1851 * error code in case of failure.
1852 */
ubifs_jnl_delete_xattr(struct ubifs_info * c,const struct inode * host,const struct inode * inode,const struct fscrypt_name * nm)1853 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1854 const struct inode *inode,
1855 const struct fscrypt_name *nm)
1856 {
1857 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len;
1858 struct ubifs_dent_node *xent;
1859 struct ubifs_ino_node *ino;
1860 union ubifs_key xent_key, key1, key2;
1861 int sync = IS_DIRSYNC(host);
1862 struct ubifs_inode *host_ui = ubifs_inode(host);
1863 u8 hash[UBIFS_HASH_ARR_SZ];
1864
1865 ubifs_assert(c, inode->i_nlink == 0);
1866 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
1867
1868 /*
1869 * Since we are deleting the inode, we do not bother to attach any data
1870 * to it and assume its length is %UBIFS_INO_NODE_SZ.
1871 */
1872 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
1873 aligned_xlen = ALIGN(xlen, 8);
1874 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1875 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1876
1877 write_len = len + ubifs_auth_node_sz(c);
1878
1879 xent = kzalloc(write_len, GFP_NOFS);
1880 if (!xent)
1881 return -ENOMEM;
1882
1883 /* Make reservation before allocating sequence numbers */
1884 err = make_reservation(c, BASEHD, write_len);
1885 if (err) {
1886 kfree(xent);
1887 return err;
1888 }
1889
1890 xent->ch.node_type = UBIFS_XENT_NODE;
1891 xent_key_init(c, &xent_key, host->i_ino, nm);
1892 key_write(c, &xent_key, xent->key);
1893 xent->inum = 0;
1894 xent->type = get_dent_type(inode->i_mode);
1895 xent->nlen = cpu_to_le16(fname_len(nm));
1896 memcpy(xent->name, fname_name(nm), fname_len(nm));
1897 xent->name[fname_len(nm)] = '\0';
1898 zero_dent_node_unused(xent);
1899 ubifs_prep_grp_node(c, xent, xlen, 0);
1900
1901 ino = (void *)xent + aligned_xlen;
1902 pack_inode(c, ino, inode, 0);
1903 ino = (void *)ino + UBIFS_INO_NODE_SZ;
1904 pack_inode(c, ino, host, 1);
1905 err = ubifs_node_calc_hash(c, ino, hash);
1906 if (err)
1907 goto out_release;
1908
1909 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync);
1910 if (!sync && !err)
1911 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1912 release_head(c, BASEHD);
1913
1914 ubifs_add_auth_dirt(c, lnum);
1915 kfree(xent);
1916 if (err)
1917 goto out_ro;
1918
1919 /* Remove the extended attribute entry from TNC */
1920 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1921 if (err)
1922 goto out_ro;
1923 err = ubifs_add_dirt(c, lnum, xlen);
1924 if (err)
1925 goto out_ro;
1926
1927 /*
1928 * Remove all nodes belonging to the extended attribute inode from TNC.
1929 * Well, there actually must be only one node - the inode itself.
1930 */
1931 lowest_ino_key(c, &key1, inode->i_ino);
1932 highest_ino_key(c, &key2, inode->i_ino);
1933 err = ubifs_tnc_remove_range(c, &key1, &key2);
1934 if (err)
1935 goto out_ro;
1936 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1937 if (err)
1938 goto out_ro;
1939
1940 /* And update TNC with the new host inode position */
1941 ino_key_init(c, &key1, host->i_ino);
1942 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash);
1943 if (err)
1944 goto out_ro;
1945
1946 finish_reservation(c);
1947 spin_lock(&host_ui->ui_lock);
1948 host_ui->synced_i_size = host_ui->ui_size;
1949 spin_unlock(&host_ui->ui_lock);
1950 mark_inode_clean(c, host_ui);
1951 return 0;
1952
1953 out_release:
1954 kfree(xent);
1955 release_head(c, BASEHD);
1956 out_ro:
1957 ubifs_ro_mode(c, err);
1958 finish_reservation(c);
1959 return err;
1960 }
1961
1962 /**
1963 * ubifs_jnl_change_xattr - change an extended attribute.
1964 * @c: UBIFS file-system description object
1965 * @inode: extended attribute inode
1966 * @host: host inode
1967 *
1968 * This function writes the updated version of an extended attribute inode and
1969 * the host inode to the journal (to the base head). The host inode is written
1970 * after the extended attribute inode in order to guarantee that the extended
1971 * attribute will be flushed when the inode is synchronized by 'fsync()' and
1972 * consequently, the write-buffer is synchronized. This function returns zero
1973 * in case of success and a negative error code in case of failure.
1974 */
ubifs_jnl_change_xattr(struct ubifs_info * c,const struct inode * inode,const struct inode * host)1975 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1976 const struct inode *host)
1977 {
1978 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1979 struct ubifs_inode *host_ui = ubifs_inode(host);
1980 struct ubifs_ino_node *ino;
1981 union ubifs_key key;
1982 int sync = IS_DIRSYNC(host);
1983 u8 hash_host[UBIFS_HASH_ARR_SZ];
1984 u8 hash[UBIFS_HASH_ARR_SZ];
1985
1986 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1987 ubifs_assert(c, inode->i_nlink > 0);
1988 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
1989
1990 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1991 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1992 aligned_len1 = ALIGN(len1, 8);
1993 aligned_len = aligned_len1 + ALIGN(len2, 8);
1994
1995 aligned_len += ubifs_auth_node_sz(c);
1996
1997 ino = kzalloc(aligned_len, GFP_NOFS);
1998 if (!ino)
1999 return -ENOMEM;
2000
2001 /* Make reservation before allocating sequence numbers */
2002 err = make_reservation(c, BASEHD, aligned_len);
2003 if (err)
2004 goto out_free;
2005
2006 pack_inode(c, ino, host, 0);
2007 err = ubifs_node_calc_hash(c, ino, hash_host);
2008 if (err)
2009 goto out_release;
2010 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
2011 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash);
2012 if (err)
2013 goto out_release;
2014
2015 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
2016 if (!sync && !err) {
2017 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
2018
2019 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
2020 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
2021 }
2022 release_head(c, BASEHD);
2023 if (err)
2024 goto out_ro;
2025
2026 ubifs_add_auth_dirt(c, lnum);
2027
2028 ino_key_init(c, &key, host->i_ino);
2029 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host);
2030 if (err)
2031 goto out_ro;
2032
2033 ino_key_init(c, &key, inode->i_ino);
2034 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash);
2035 if (err)
2036 goto out_ro;
2037
2038 finish_reservation(c);
2039 spin_lock(&host_ui->ui_lock);
2040 host_ui->synced_i_size = host_ui->ui_size;
2041 spin_unlock(&host_ui->ui_lock);
2042 mark_inode_clean(c, host_ui);
2043 kfree(ino);
2044 return 0;
2045
2046 out_release:
2047 release_head(c, BASEHD);
2048 out_ro:
2049 ubifs_ro_mode(c, err);
2050 finish_reservation(c);
2051 out_free:
2052 kfree(ino);
2053 return err;
2054 }
2055
2056